diff --git a/docs/integration.md b/docs/integration.md
index 8140017fb3..cd4dbdd114 100644
--- a/docs/integration.md
+++ b/docs/integration.md
@@ -19,7 +19,7 @@ stellar-core generates several types of data that can be used by applications, d
 
 Full [Ledger](ledger.md) snapshots are available in both:
   * [history archives](history.md) (checkpoints, every 64 ledgers, updated every 5 minutes)
-  * in the case of captive-core (enabled via the `--in-memory` command line option) the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates.
+* in the case of captive-core the ledger is maintained within the stellar-core process and ledger-state need to be tracked as it changes via "meta" updates.
 
 ## Ledger State transition information (transactions, etc)
 
diff --git a/docs/quick-reference.md b/docs/quick-reference.md
index 24c76a6db5..56d4af2d9c 100644
--- a/docs/quick-reference.md
+++ b/docs/quick-reference.md
@@ -147,9 +147,8 @@ transactions or ledger states) must be downloaded and verified sequentially. It
 worthwhile to save and reuse such a trusted reference file multiple times before regenerating it.
 
 ##### Experimental fast "meta data generation"
-`catchup` has a command line flag `--in-memory` that when combined with the
-`METADATA_OUTPUT_STREAM` allows a stellar-core instance to stream meta data instead
-of using a database as intermediate store.
+`catchup` when combined with the
+`METADATA_OUTPUT_STREAM` allows a stellar-core instance to stream meta data.
 
 This has been tested as being orders of magnitude faster for replaying large sections
 of history.
@@ -157,17 +156,7 @@ of history.
 If you don't specify any value for stream the command will just replay transactions
 in memory and throw away all meta. This can be useful for performance testing the transaction processing subsystem.
 
-The `--in-memory` flag is also supported by the `run` command, which can be used to
-run a lightweight, stateless validator or watcher node, and this can be combined with
-`METADATA_OUTPUT_STREAM` to stream network activity to another process.
-
-By default, such a stateless node in `run` mode will catch up to the network starting from the
-network's most recent checkpoint, but this behaviour can be further modified using two flags
-(that must be used together) called `--start-at-ledger <N>` and `--start-at-hash <HEXHASH>`. These
-cause the node to start with a fast in-memory catchup to ledger `N` with hash `HEXHASH`, and then
-replay ledgers forward to the current state of the network.
-
-A stateless and meta-streaming node can additionally be configured with
+A meta-streaming node can additionally be configured with
 `EXPERIMENTAL_PRECAUTION_DELAY_META=true` (if unspecified, the default is
 `false`).  If `EXPERIMENTAL_PRECAUTION_DELAY_META` is `true`, then the node will
 delay emitting meta for a ledger `<N>` until the _next_ ledger, `<N+1>`, closes.
diff --git a/docs/software/commands.md b/docs/software/commands.md
index ac51fe10f0..cc06917f25 100644
--- a/docs/software/commands.md
+++ b/docs/software/commands.md
@@ -159,13 +159,7 @@ apply.
   checkpoint from a history archive.
 * **run**: Runs stellar-core service.<br>
   Option **--wait-for-consensus** lets validators wait to hear from the network
-  before participating in consensus.<br>
-  (deprecated) Option **--in-memory** stores the current ledger in memory rather than a
-  database.<br>
-  (deprecated) Option **--start-at-ledger <N>** starts **--in-memory** mode with a catchup to
-  ledger **N** then replays to the current state of the network.<br>
-  (deprecated) Option **--start-at-hash <HASH>** provides a (mandatory) hash for the ledger
-  **N** specified by the **--start-at-ledger** option.
+  before participating in consensus.
 * **sec-to-pub**:  Reads a secret key on standard input and outputs the
   corresponding public key.  Both keys are in Stellar's standard
   base-32 ASCII format.
diff --git a/docs/stellar-core_example.cfg b/docs/stellar-core_example.cfg
index 103c115cf4..873a7b955a 100644
--- a/docs/stellar-core_example.cfg
+++ b/docs/stellar-core_example.cfg
@@ -229,14 +229,6 @@ FLOOD_DEMAND_BACKOFF_DELAY_MS = 500
 #   against each other.
 MAX_DEX_TX_OPERATIONS_IN_TX_SET = 0
 
-# DEPRECATED_SQL_LEDGER_STATE (bool) default false
-# When set to true, SQL is used to store all ledger state instead of
-# BucketListDB. This is not recommended and may cause performance degregradation.
-# This is deprecated and will be removed in the future. Note that offers table
-# is still maintained in SQL when this is set to false, but all other ledger
-# state tables are dropped.
-DEPRECATED_SQL_LEDGER_STATE = false
-
 # BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT (Integer) default 14
 # Determines page size used by BucketListDB for range indexes, where
 # pageSize == 2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT. If set to
@@ -258,11 +250,6 @@ BUCKETLIST_DB_INDEX_CUTOFF = 20
 # this value is ingnored and indexes are never persisted.
 BUCKETLIST_DB_PERSIST_INDEX = true
 
-# BACKGROUND_EVICTION_SCAN (bool) default true
-# Determines whether eviction scans occur in the background thread. Requires
-# that DEPRECATED_SQL_LEDGER_STATE is set to false.
-BACKGROUND_EVICTION_SCAN = true
-
 # EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING (bool) default false
 # Determines whether some of overlay processing occurs in the background
 # thread.
@@ -601,17 +588,12 @@ MAX_SLOTS_TO_REMEMBER=12
 # only a passive "watcher" node.
 METADATA_OUTPUT_STREAM=""
 
-# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a stateless node
+# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true causes a node
 # which is streaming meta to delay streaming the meta for a given ledger until
 # it closes the next ledger. This ensures that if a local bug had corrupted the
 # given ledger, then the meta for the corrupted ledger will never be emitted, as
 # the node will not be able to reach consensus with the network on the next
 # ledger.
-#
-# Setting EXPERIMENTAL_PRECAUTION_DELAY_META to true in combination with a
-# non-empty METADATA_OUTPUT_STREAM (which can be configured on the command line
-# as well as in the config file) requires an in-memory database (specified by
-# using --in-memory on the command line).
 EXPERIMENTAL_PRECAUTION_DELAY_META=false
 
 # Number of ledgers worth of transaction metadata to preserve on disk for
diff --git a/docs/stellar-core_example_validators.cfg b/docs/stellar-core_example_validators.cfg
index 10d6ced3ee..a1203e1047 100644
--- a/docs/stellar-core_example_validators.cfg
+++ b/docs/stellar-core_example_validators.cfg
@@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false
 NETWORK_PASSPHRASE="Example configuration"
 
 DATABASE="sqlite3://example.db"
-DEPRECATED_SQL_LEDGER_STATE = false
 
 NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3"
 NODE_HOME_DOMAIN="domainA"
diff --git a/docs/stellar-core_standalone.cfg b/docs/stellar-core_standalone.cfg
index b9fd80a509..858e97d002 100644
--- a/docs/stellar-core_standalone.cfg
+++ b/docs/stellar-core_standalone.cfg
@@ -12,7 +12,6 @@ NODE_IS_VALIDATOR=true
 
 #DATABASE="postgresql://dbname=stellar user=postgres password=password host=localhost"
 DATABASE="sqlite3://stellar.db"
-DEPRECATED_SQL_LEDGER_STATE = false
 
 COMMANDS=["ll?level=debug"]
 
diff --git a/docs/stellar-core_testnet.cfg b/docs/stellar-core_testnet.cfg
index 77c834eb62..981105b7a6 100644
--- a/docs/stellar-core_testnet.cfg
+++ b/docs/stellar-core_testnet.cfg
@@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false
 NETWORK_PASSPHRASE="Test SDF Network ; September 2015"
 
 DATABASE="sqlite3://stellar.db"
-DEPRECATED_SQL_LEDGER_STATE = false
 
 # Stellar Testnet validators
 [[HOME_DOMAINS]]
diff --git a/docs/stellar-core_testnet_legacy.cfg b/docs/stellar-core_testnet_legacy.cfg
index 946e7c8bc9..0ff9909c9f 100644
--- a/docs/stellar-core_testnet_legacy.cfg
+++ b/docs/stellar-core_testnet_legacy.cfg
@@ -9,7 +9,6 @@ KNOWN_PEERS=[
 "core-testnet3.stellar.org"]
 
 DATABASE="sqlite3://stellar.db"
-DEPRECATED_SQL_LEDGER_STATE = false
 UNSAFE_QUORUM=true
 FAILURE_SAFETY=1
 
diff --git a/docs/stellar-core_testnet_validator.cfg b/docs/stellar-core_testnet_validator.cfg
index c5d5768e87..fa329c0c43 100644
--- a/docs/stellar-core_testnet_validator.cfg
+++ b/docs/stellar-core_testnet_validator.cfg
@@ -4,7 +4,6 @@ PUBLIC_HTTP_PORT=false
 NETWORK_PASSPHRASE="Test SDF Network ; September 2015"
 
 DATABASE="sqlite3://stellar.db"
-DEPRECATED_SQL_LEDGER_STATE = false
 
 # Configuring the node as a validator
 # note that this is an unsafe configuration in this particular setup:
diff --git a/src/bucket/Bucket.cpp b/src/bucket/Bucket.cpp
index 61e96b3a28..247a3a48e2 100644
--- a/src/bucket/Bucket.cpp
+++ b/src/bucket/Bucket.cpp
@@ -8,6 +8,7 @@
 #include "util/asio.h" // IWYU pragma: keep
 #include "bucket/Bucket.h"
 #include "bucket/BucketApplicator.h"
+#include "bucket/BucketInputIterator.h"
 #include "bucket/BucketList.h"
 #include "bucket/BucketListSnapshot.h"
 #include "bucket/BucketManager.h"
@@ -24,11 +25,13 @@
 #include "util/Fs.h"
 #include "util/GlobalChecks.h"
 #include "util/Logging.h"
+#include "util/ProtocolVersion.h"
 #include "util/XDRStream.h"
 #include "util/types.h"
 #include <Tracy.hpp>
 
 #include "medida/counter.h"
+#include "xdr/Stellar-ledger.h"
 
 namespace stellar
 {
@@ -97,10 +100,10 @@ Bucket::getSize() const
 }
 
 bool
-Bucket::containsBucketIdentity(BucketEntry const& id) const
+LiveBucket::containsBucketIdentity(BucketEntry const& id) const
 {
-    BucketEntryIdCmp cmp;
-    BucketInputIterator iter(shared_from_this());
+    BucketEntryIdCmp<LiveBucket> cmp;
+    LiveBucketInputIterator iter(shared_from_this());
     while (iter)
     {
         if (!(cmp(*iter, id) || cmp(id, *iter)))
@@ -132,19 +135,10 @@ Bucket::freeIndex()
 
 #ifdef BUILD_TESTS
 void
-Bucket::apply(Application& app) const
+LiveBucket::apply(Application& app) const
 {
     ZoneScoped;
 
-    auto filter = [&](LedgerEntryType t) {
-        if (app.getConfig().isUsingBucketListDB())
-        {
-            return t == OFFER;
-        }
-
-        return true;
-    };
-
     std::unordered_set<LedgerKey> emptySet;
     BucketApplicator applicator(
         app, app.getConfig().LEDGER_PROTOCOL_VERSION,
@@ -152,7 +146,7 @@ Bucket::apply(Application& app) const
         0 /*set to a level that's not the bottom so we don't treat live entries
              as init*/
         ,
-        shared_from_this(), filter, emptySet);
+        shared_from_this(), emptySet);
     BucketApplicator::Counters counters(app.getClock().now());
     while (applicator)
     {
@@ -163,10 +157,10 @@ Bucket::apply(Application& app) const
 #endif // BUILD_TESTS
 
 std::vector<BucketEntry>
-Bucket::convertToBucketEntry(bool useInit,
-                             std::vector<LedgerEntry> const& initEntries,
-                             std::vector<LedgerEntry> const& liveEntries,
-                             std::vector<LedgerKey> const& deadEntries)
+LiveBucket::convertToBucketEntry(bool useInit,
+                                 std::vector<LedgerEntry> const& initEntries,
+                                 std::vector<LedgerEntry> const& liveEntries,
+                                 std::vector<LedgerKey> const& deadEntries)
 {
     std::vector<BucketEntry> bucket;
     for (auto const& e : initEntries)
@@ -191,7 +185,7 @@ Bucket::convertToBucketEntry(bool useInit,
         bucket.push_back(ce);
     }
 
-    BucketEntryIdCmp cmp;
+    BucketEntryIdCmp<LiveBucket> cmp;
     std::sort(bucket.begin(), bucket.end(), cmp);
     releaseAssert(std::adjacent_find(
                       bucket.begin(), bucket.end(),
@@ -229,12 +223,83 @@ Bucket::randomBucketIndexName(std::string const& tmpDir)
     return randomFileName(tmpDir, ".index");
 }
 
-std::shared_ptr<Bucket>
-Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion,
-              std::vector<LedgerEntry> const& initEntries,
-              std::vector<LedgerEntry> const& liveEntries,
-              std::vector<LedgerKey> const& deadEntries, bool countMergeEvents,
-              asio::io_context& ctx, bool doFsync)
+std::vector<HotArchiveBucketEntry>
+HotArchiveBucket::convertToBucketEntry(
+    std::vector<LedgerEntry> const& archivedEntries,
+    std::vector<LedgerKey> const& restoredEntries,
+    std::vector<LedgerKey> const& deletedEntries)
+{
+    std::vector<HotArchiveBucketEntry> bucket;
+    for (auto const& e : archivedEntries)
+    {
+        HotArchiveBucketEntry be;
+        be.type(HOT_ARCHIVE_ARCHIVED);
+        be.archivedEntry() = e;
+        bucket.push_back(be);
+    }
+    for (auto const& k : restoredEntries)
+    {
+        HotArchiveBucketEntry be;
+        be.type(HOT_ARCHIVE_LIVE);
+        be.key() = k;
+        bucket.push_back(be);
+    }
+    for (auto const& k : deletedEntries)
+    {
+        HotArchiveBucketEntry be;
+        be.type(HOT_ARCHIVE_DELETED);
+        be.key() = k;
+        bucket.push_back(be);
+    }
+
+    BucketEntryIdCmp<HotArchiveBucket> cmp;
+    std::sort(bucket.begin(), bucket.end(), cmp);
+    releaseAssert(std::adjacent_find(bucket.begin(), bucket.end(),
+                                     [&cmp](HotArchiveBucketEntry const& lhs,
+                                            HotArchiveBucketEntry const& rhs) {
+                                         return !cmp(lhs, rhs);
+                                     }) == bucket.end());
+    return bucket;
+}
+
+std::shared_ptr<HotArchiveBucket>
+HotArchiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion,
+                        std::vector<LedgerEntry> const& archivedEntries,
+                        std::vector<LedgerKey> const& restoredEntries,
+                        std::vector<LedgerKey> const& deletedEntries,
+                        bool countMergeEvents, asio::io_context& ctx,
+                        bool doFsync)
+{
+    ZoneScoped;
+    BucketMetadata meta;
+    meta.ledgerVersion = protocolVersion;
+    meta.ext.v(1);
+    meta.ext.bucketListType() = BucketListType::HOT_ARCHIVE;
+    auto entries =
+        convertToBucketEntry(archivedEntries, restoredEntries, deletedEntries);
+
+    MergeCounters mc;
+    HotArchiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta,
+                                       mc, ctx, doFsync);
+    for (auto const& e : entries)
+    {
+        out.put(e);
+    }
+
+    if (countMergeEvents)
+    {
+        bucketManager.incrMergeCounters(mc);
+    }
+
+    return out.getBucket(bucketManager);
+}
+
+std::shared_ptr<LiveBucket>
+LiveBucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion,
+                  std::vector<LedgerEntry> const& initEntries,
+                  std::vector<LedgerEntry> const& liveEntries,
+                  std::vector<LedgerKey> const& deadEntries,
+                  bool countMergeEvents, asio::io_context& ctx, bool doFsync)
 {
     ZoneScoped;
     // When building fresh buckets after protocol version 10 (i.e. version
@@ -245,12 +310,21 @@ Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion,
 
     BucketMetadata meta;
     meta.ledgerVersion = protocolVersion;
+
+    if (protocolVersionStartsFrom(
+            protocolVersion,
+            Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION))
+    {
+        meta.ext.v(1);
+        meta.ext.bucketListType() = BucketListType::LIVE;
+    }
+
     auto entries =
         convertToBucketEntry(useInit, initEntries, liveEntries, deadEntries);
 
     MergeCounters mc;
-    BucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx,
-                             doFsync);
+    LiveBucketOutputIterator out(bucketManager.getTmpDir(), true, meta, mc, ctx,
+                                 doFsync);
     for (auto const& e : entries)
     {
         out.put(e);
@@ -261,8 +335,7 @@ Bucket::fresh(BucketManager& bucketManager, uint32_t protocolVersion,
         bucketManager.incrMergeCounters(mc);
     }
 
-    return out.getBucket(bucketManager,
-                         bucketManager.getConfig().isUsingBucketListDB());
+    return out.getBucket(bucketManager);
 }
 
 static void
@@ -286,8 +359,8 @@ countShadowedEntryType(MergeCounters& mc, BucketEntry const& e)
 }
 
 void
-Bucket::checkProtocolLegality(BucketEntry const& entry,
-                              uint32_t protocolVersion)
+LiveBucket::checkProtocolLegality(BucketEntry const& entry,
+                                  uint32_t protocolVersion)
 {
     if (protocolVersionIsBefore(
             protocolVersion,
@@ -301,8 +374,19 @@ Bucket::checkProtocolLegality(BucketEntry const& entry,
 }
 
 inline void
-maybePut(BucketOutputIterator& out, BucketEntry const& entry,
-         std::vector<BucketInputIterator>& shadowIterators,
+maybePut(HotArchiveBucketOutputIterator& out,
+         HotArchiveBucketEntry const& entry,
+         std::vector<HotArchiveBucketInputIterator>& shadowIterators,
+         bool keepShadowedLifecycleEntries, MergeCounters& mc)
+{
+    // Archived BucketList is only present after protocol 21, so shadows are
+    // never supported
+    out.put(entry);
+}
+
+inline void
+maybePut(LiveBucketOutputIterator& out, BucketEntry const& entry,
+         std::vector<LiveBucketInputIterator>& shadowIterators,
          bool keepShadowedLifecycleEntries, MergeCounters& mc)
 {
     // In ledgers before protocol 11, keepShadowedLifecycleEntries will be
@@ -340,8 +424,8 @@ maybePut(BucketOutputIterator& out, BucketEntry const& entry,
     // Note that this decision only controls whether to elide dead entries due
     // to _shadows_. There is a secondary elision of dead entries at the _oldest
     // level_ of the bucketlist that is accomplished through filtering at the
-    // BucketOutputIterator level, and happens independent of ledger protocol
-    // version.
+    // LiveBucketOutputIterator level, and happens independent of ledger
+    // protocol version.
 
     if (keepShadowedLifecycleEntries &&
         (entry.type() == INITENTRY || entry.type() == DEADENTRY))
@@ -351,7 +435,7 @@ maybePut(BucketOutputIterator& out, BucketEntry const& entry,
         return;
     }
 
-    BucketEntryIdCmp cmp;
+    BucketEntryIdCmp<LiveBucket> cmp;
     for (auto& si : shadowIterators)
     {
         // Advance the shadowIterator while it's less than the candidate
@@ -447,11 +531,13 @@ countNewEntryType(MergeCounters& mc, BucketEntry const& e)
 // and shadowing protocol simultaneously, the moment the first new-protocol
 // bucket enters the youngest level. At least one new bucket is in every merge's
 // shadows from then on in, so they all upgrade (and preserve lifecycle events).
+template <class BucketT>
 static void
 calculateMergeProtocolVersion(
     MergeCounters& mc, uint32_t maxProtocolVersion,
-    BucketInputIterator const& oi, BucketInputIterator const& ni,
-    std::vector<BucketInputIterator> const& shadowIterators,
+    BucketInputIterator<BucketT> const& oi,
+    BucketInputIterator<BucketT> const& ni,
+    std::vector<BucketInputIterator<BucketT>> const& shadowIterators,
     uint32& protocolVersion, bool& keepShadowedLifecycleEntries)
 {
     protocolVersion = std::max(oi.getMetadata().ledgerVersion,
@@ -466,7 +552,7 @@ calculateMergeProtocolVersion(
     {
         auto version = si.getMetadata().ledgerVersion;
         if (protocolVersionIsBefore(version,
-                                    Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+                                    LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
         {
             protocolVersion = std::max(version, protocolVersion);
         }
@@ -488,9 +574,16 @@ calculateMergeProtocolVersion(
     // support annihilation of INITENTRY and DEADENTRY pairs. See commentary
     // above in `maybePut`.
     keepShadowedLifecycleEntries = true;
+
+    // Don't count shadow metrics for Hot Archive BucketList
+    if constexpr (std::is_same_v<BucketT, HotArchiveBucket>)
+    {
+        return;
+    }
+
     if (protocolVersionIsBefore(
             protocolVersion,
-            Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+            LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
     {
         ++mc.mPreInitEntryProtocolMerges;
         keepShadowedLifecycleEntries = false;
@@ -501,7 +594,7 @@ calculateMergeProtocolVersion(
     }
 
     if (protocolVersionIsBefore(protocolVersion,
-                                Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+                                LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
     {
         ++mc.mPreShadowRemovalProtocolMerges;
     }
@@ -519,13 +612,18 @@ calculateMergeProtocolVersion(
 // side, or entries that compare non-equal. In all these cases we just
 // take the lesser (or existing) entry and advance only one iterator,
 // not scrutinizing the entry type further.
+template <class BucketT>
 static bool
 mergeCasesWithDefaultAcceptance(
-    BucketEntryIdCmp const& cmp, MergeCounters& mc, BucketInputIterator& oi,
-    BucketInputIterator& ni, BucketOutputIterator& out,
-    std::vector<BucketInputIterator>& shadowIterators, uint32_t protocolVersion,
-    bool keepShadowedLifecycleEntries)
+    BucketEntryIdCmp<BucketT> const& cmp, MergeCounters& mc,
+    BucketInputIterator<BucketT>& oi, BucketInputIterator<BucketT>& ni,
+    BucketOutputIterator<BucketT>& out,
+    std::vector<BucketInputIterator<BucketT>>& shadowIterators,
+    uint32_t protocolVersion, bool keepShadowedLifecycleEntries)
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
     if (!ni || (oi && ni && cmp(*oi, *ni)))
     {
         // Either of:
@@ -535,8 +633,11 @@ mergeCasesWithDefaultAcceptance(
         //
         // In both cases: take old entry.
         ++mc.mOldEntriesDefaultAccepted;
-        Bucket::checkProtocolLegality(*oi, protocolVersion);
-        countOldEntryType(mc, *oi);
+        if constexpr (std::is_same_v<BucketT, LiveBucket>)
+        {
+            LiveBucket::checkProtocolLegality(*oi, protocolVersion);
+            countOldEntryType(mc, *oi);
+        }
         maybePut(out, *oi, shadowIterators, keepShadowedLifecycleEntries, mc);
         ++oi;
         return true;
@@ -550,8 +651,11 @@ mergeCasesWithDefaultAcceptance(
         //
         // In both cases: take new entry.
         ++mc.mNewEntriesDefaultAccepted;
-        Bucket::checkProtocolLegality(*ni, protocolVersion);
-        countNewEntryType(mc, *ni);
+        if constexpr (std::is_same_v<BucketT, LiveBucket>)
+        {
+            LiveBucket::checkProtocolLegality(*ni, protocolVersion);
+            countNewEntryType(mc, *ni);
+        }
         maybePut(out, *ni, shadowIterators, keepShadowedLifecycleEntries, mc);
         ++ni;
         return true;
@@ -562,9 +666,33 @@ mergeCasesWithDefaultAcceptance(
 // The remaining cases happen when keys are equal and we have to reason
 // through the relationships of their bucket lifecycle states. Trickier.
 static void
-mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
-                        BucketInputIterator& ni, BucketOutputIterator& out,
-                        std::vector<BucketInputIterator>& shadowIterators,
+mergeCasesWithEqualKeys(
+    MergeCounters& mc, HotArchiveBucketInputIterator& oi,
+    HotArchiveBucketInputIterator& ni, HotArchiveBucketOutputIterator& out,
+    std::vector<HotArchiveBucketInputIterator>& shadowIterators,
+    uint32_t protocolVersion, bool keepShadowedLifecycleEntries)
+{
+    // If two identical keys have the same type, throw an error. Otherwise,
+    // take the newer key.
+    HotArchiveBucketEntry const& oldEntry = *oi;
+    HotArchiveBucketEntry const& newEntry = *ni;
+    if (oldEntry.type() == newEntry.type())
+    {
+        throw std::runtime_error(
+            "Malformed Hot Archive bucket: two identical keys with "
+            "the same type.");
+    }
+
+    out.put(newEntry);
+    ++ni;
+    ++oi;
+}
+
+static void
+mergeCasesWithEqualKeys(MergeCounters& mc, LiveBucketInputIterator& oi,
+                        LiveBucketInputIterator& ni,
+                        LiveBucketOutputIterator& out,
+                        std::vector<LiveBucketInputIterator>& shadowIterators,
                         uint32_t protocolVersion,
                         bool keepShadowedLifecycleEntries)
 {
@@ -633,8 +761,8 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
 
     BucketEntry const& oldEntry = *oi;
     BucketEntry const& newEntry = *ni;
-    Bucket::checkProtocolLegality(oldEntry, protocolVersion);
-    Bucket::checkProtocolLegality(newEntry, protocolVersion);
+    LiveBucket::checkProtocolLegality(oldEntry, protocolVersion);
+    LiveBucket::checkProtocolLegality(newEntry, protocolVersion);
     countOldEntryType(mc, oldEntry);
     countNewEntryType(mc, newEntry);
 
@@ -684,107 +812,18 @@ mergeCasesWithEqualKeys(MergeCounters& mc, BucketInputIterator& oi,
     ++ni;
 }
 
-bool
-Bucket::scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter,
-                              uint32_t& bytesToScan,
-                              uint32_t& remainingEntriesToEvict,
-                              uint32_t ledgerSeq,
-                              medida::Counter& entriesEvictedCounter,
-                              medida::Counter& bytesScannedForEvictionCounter,
-                              std::shared_ptr<EvictionStatistics> stats) const
-{
-    ZoneScoped;
-    releaseAssert(stats);
-
-    if (isEmpty() ||
-        protocolVersionIsBefore(getBucketVersion(shared_from_this()),
-                                SOROBAN_PROTOCOL_VERSION))
-    {
-        // EOF, skip to next bucket
-        return false;
-    }
-
-    if (remainingEntriesToEvict == 0 || bytesToScan == 0)
-    {
-        // Reached end of scan region
-        return true;
-    }
-
-    XDRInputFileStream stream{};
-    stream.open(mFilename.string());
-    stream.seek(iter.bucketFileOffset);
-
-    BucketEntry be;
-    while (stream.readOne(be))
-    {
-        if (be.type() == INITENTRY || be.type() == LIVEENTRY)
-        {
-            auto const& le = be.liveEntry();
-            if (isTemporaryEntry(le.data))
-            {
-                ZoneNamedN(maybeEvict, "maybe evict entry", true);
-
-                auto ttlKey = getTTLKey(le);
-                uint32_t liveUntilLedger = 0;
-                auto shouldEvict = [&] {
-                    auto ttlLtxe = ltx.loadWithoutRecord(ttlKey);
-                    if (!ttlLtxe)
-                    {
-                        // Entry was already deleted either manually or by an
-                        // earlier eviction scan, do nothing
-                        return false;
-                    }
-
-                    releaseAssert(ttlLtxe);
-                    liveUntilLedger =
-                        ttlLtxe.current().data.ttl().liveUntilLedgerSeq;
-                    return !isLive(ttlLtxe.current(), ledgerSeq);
-                };
-
-                if (shouldEvict())
-                {
-                    ZoneNamedN(evict, "evict entry", true);
-                    auto age = ledgerSeq - liveUntilLedger;
-                    stats->recordEvictedEntry(age);
-
-                    ltx.erase(ttlKey);
-                    ltx.erase(LedgerEntryKey(le));
-                    entriesEvictedCounter.inc();
-                    --remainingEntriesToEvict;
-                }
-            }
-        }
-
-        auto newPos = stream.pos();
-        auto bytesRead = newPos - iter.bucketFileOffset;
-        iter.bucketFileOffset = newPos;
-        bytesScannedForEvictionCounter.inc(bytesRead);
-        if (bytesRead >= bytesToScan)
-        {
-            // Reached end of scan region
-            bytesToScan = 0;
-            return true;
-        }
-        else if (remainingEntriesToEvict == 0)
-        {
-            return true;
-        }
-
-        bytesToScan -= bytesRead;
-    }
-
-    // Hit eof
-    return false;
-}
-
-std::shared_ptr<Bucket>
+template <class BucketT>
+std::shared_ptr<BucketT>
 Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion,
-              std::shared_ptr<Bucket> const& oldBucket,
-              std::shared_ptr<Bucket> const& newBucket,
-              std::vector<std::shared_ptr<Bucket>> const& shadows,
-              bool keepDeadEntries, bool countMergeEvents,
+              std::shared_ptr<BucketT> const& oldBucket,
+              std::shared_ptr<BucketT> const& newBucket,
+              std::vector<std::shared_ptr<BucketT>> const& shadows,
+              bool keepTombstoneEntries, bool countMergeEvents,
               asio::io_context& ctx, bool doFsync)
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
     ZoneScoped;
     // This is the key operation in the scheme: merging two (read-only)
     // buckets together into a new 3rd bucket, while calculating its hash,
@@ -794,24 +833,43 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion,
     releaseAssert(newBucket);
 
     MergeCounters mc;
-    BucketInputIterator oi(oldBucket);
-    BucketInputIterator ni(newBucket);
-    std::vector<BucketInputIterator> shadowIterators(shadows.begin(),
-                                                     shadows.end());
+    BucketInputIterator<BucketT> oi(oldBucket);
+    BucketInputIterator<BucketT> ni(newBucket);
+    std::vector<BucketInputIterator<BucketT>> shadowIterators(shadows.begin(),
+                                                              shadows.end());
 
     uint32_t protocolVersion;
     bool keepShadowedLifecycleEntries;
-    calculateMergeProtocolVersion(mc, maxProtocolVersion, oi, ni,
-                                  shadowIterators, protocolVersion,
-                                  keepShadowedLifecycleEntries);
+    calculateMergeProtocolVersion<BucketT>(mc, maxProtocolVersion, oi, ni,
+                                           shadowIterators, protocolVersion,
+                                           keepShadowedLifecycleEntries);
 
     auto timer = bucketManager.getMergeTimer().TimeScope();
     BucketMetadata meta;
     meta.ledgerVersion = protocolVersion;
-    BucketOutputIterator out(bucketManager.getTmpDir(), keepDeadEntries, meta,
-                             mc, ctx, doFsync);
 
-    BucketEntryIdCmp cmp;
+    // If any inputs use the new extension of BucketMeta, the output should as
+    // well
+    if (ni.getMetadata().ext.v() == 1)
+    {
+        releaseAssertOrThrow(protocolVersionStartsFrom(
+            maxProtocolVersion,
+            Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION));
+        meta.ext = oi.getMetadata().ext;
+    }
+    else if (oi.getMetadata().ext.v() == 1)
+    {
+        releaseAssertOrThrow(protocolVersionStartsFrom(
+            maxProtocolVersion,
+            Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION));
+        meta.ext = oi.getMetadata().ext;
+    }
+
+    BucketOutputIterator<BucketT> out(bucketManager.getTmpDir(),
+                                      keepTombstoneEntries, meta, mc, ctx,
+                                      doFsync);
+
+    BucketEntryIdCmp<BucketT> cmp;
     size_t iter = 0;
 
     while (oi || ni)
@@ -843,34 +901,91 @@ Bucket::merge(BucketManager& bucketManager, uint32_t maxProtocolVersion,
     {
         bucketManager.incrMergeCounters(mc);
     }
-    MergeKey mk{keepDeadEntries, oldBucket, newBucket, shadows};
-    return out.getBucket(bucketManager,
-                         bucketManager.getConfig().isUsingBucketListDB(), &mk);
+
+    std::vector<Hash> shadowHashes;
+    shadowHashes.reserve(shadows.size());
+    for (auto const& s : shadows)
+    {
+        shadowHashes.push_back(s->getHash());
+    }
+
+    MergeKey mk{keepTombstoneEntries, oldBucket->getHash(),
+                newBucket->getHash(), shadowHashes};
+    return out.getBucket(bucketManager, &mk);
+}
+
+LiveBucket::LiveBucket(std::string const& filename, Hash const& hash,
+                       std::unique_ptr<BucketIndex const>&& index)
+    : Bucket(filename, hash, std::move(index))
+{
+}
+
+LiveBucket::LiveBucket() : Bucket()
+{
 }
 
 uint32_t
-Bucket::getBucketVersion(std::shared_ptr<Bucket> const& bucket)
+LiveBucket::getBucketVersion() const
 {
-    releaseAssert(bucket);
-    BucketInputIterator it(bucket);
+    LiveBucketInputIterator it(shared_from_this());
     return it.getMetadata().ledgerVersion;
 }
 
 uint32_t
-Bucket::getBucketVersion(std::shared_ptr<Bucket const> const& bucket)
+HotArchiveBucket::getBucketVersion() const
 {
-    releaseAssert(bucket);
-    BucketInputIterator it(bucket);
+    HotArchiveBucketInputIterator it(shared_from_this());
+    return it.getMetadata().ledgerVersion;
+}
+
+uint32_t
+ColdArchiveBucket::getBucketVersion() const
+{
+    ColdArchiveBucketInputIterator it(shared_from_this());
     return it.getMetadata().ledgerVersion;
 }
 
 BucketEntryCounters const&
-Bucket::getBucketEntryCounters() const
+LiveBucket::getBucketEntryCounters() const
 {
     releaseAssert(mIndex);
     return mIndex->getBucketEntryCounters();
 }
 
+HotArchiveBucket::HotArchiveBucket(std::string const& filename,
+                                   Hash const& hash,
+                                   std::unique_ptr<BucketIndex const>&& index)
+    : Bucket(filename, hash, std::move(index))
+{
+}
+
+ColdArchiveBucket::ColdArchiveBucket(std::string const& filename,
+                                     Hash const& hash,
+                                     std::unique_ptr<BucketIndex const>&& index)
+    : Bucket(filename, hash, std::move(index))
+{
+}
+
+HotArchiveBucket::HotArchiveBucket() : Bucket()
+{
+}
+
+ColdArchiveBucket::ColdArchiveBucket() : Bucket()
+{
+}
+
+bool
+LiveBucket::isTombstoneEntry(BucketEntry const& e)
+{
+    return e.type() == DEADENTRY;
+}
+
+bool
+HotArchiveBucket::isTombstoneEntry(HotArchiveBucketEntry const& e)
+{
+    return e.type() == HOT_ARCHIVE_LIVE;
+}
+
 BucketEntryCounters&
 BucketEntryCounters::operator+=(BucketEntryCounters const& other)
 {
@@ -897,4 +1012,20 @@ BucketEntryCounters::operator!=(BucketEntryCounters const& other) const
 {
     return !(*this == other);
 }
+
+template std::shared_ptr<LiveBucket> Bucket::merge<LiveBucket>(
+    BucketManager& bucketManager, uint32_t maxProtocolVersion,
+    std::shared_ptr<LiveBucket> const& oldBucket,
+    std::shared_ptr<LiveBucket> const& newBucket,
+    std::vector<std::shared_ptr<LiveBucket>> const& shadows,
+    bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx,
+    bool doFsync);
+
+template std::shared_ptr<HotArchiveBucket> Bucket::merge<HotArchiveBucket>(
+    BucketManager& bucketManager, uint32_t maxProtocolVersion,
+    std::shared_ptr<HotArchiveBucket> const& oldBucket,
+    std::shared_ptr<HotArchiveBucket> const& newBucket,
+    std::vector<std::shared_ptr<HotArchiveBucket>> const& shadows,
+    bool keepTombstoneEntries, bool countMergeEvents, asio::io_context& ctx,
+    bool doFsync);
 }
\ No newline at end of file
diff --git a/src/bucket/Bucket.h b/src/bucket/Bucket.h
index c4b6773949..7479c8ff6d 100644
--- a/src/bucket/Bucket.h
+++ b/src/bucket/Bucket.h
@@ -5,6 +5,7 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "bucket/BucketIndex.h"
+#include "bucket/BucketSnapshot.h"
 #include "util/NonCopyable.h"
 #include "util/ProtocolVersion.h"
 #include "xdr/Stellar-ledger.h"
@@ -36,19 +37,28 @@ namespace stellar
  * Two buckets can be merged together efficiently (in a single pass): elements
  * from the newer bucket overwrite elements from the older bucket, the rest are
  * merged in sorted order, and all elements are hashed while being added.
+ *
+ * Different types of BucketList vary on the type of entries they contain and by
+ * extension the merge logic of those entries. Additionally, some types of
+ * BucketList may have special operations only relevant to that specific type.
+ * This pure virtual base class provides the core functionality of a BucketList
+ * container and must be extened for each specific BucketList type. In
+ * particular, the fresh and merge functions must be defined for the specific
+ * type, while other functionality can be shared.
  */
 
 class AbstractLedgerTxn;
 class Application;
 class BucketManager;
-class SearchableBucketListSnapshot;
 struct EvictionResultEntry;
 class EvictionStatistics;
 struct BucketEntryCounters;
+template <class BucketT> class SearchableBucketListSnapshot;
+enum class LedgerEntryTypeAndDurability : uint32_t;
 
-class Bucket : public std::enable_shared_from_this<Bucket>,
-               public NonMovableOrCopyable
+class Bucket : public NonMovableOrCopyable
 {
+  protected:
     std::filesystem::path const mFilename;
     Hash const mHash;
     size_t mSize{0};
@@ -62,6 +72,9 @@ class Bucket : public std::enable_shared_from_this<Bucket>,
                                       std::string ext);
 
   public:
+    static constexpr ProtocolVersion
+        FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION = ProtocolVersion::V_23;
+
     // Create an empty bucket. The empty bucket has hash '000000...' and its
     // filename is the empty string.
     Bucket();
@@ -76,10 +89,6 @@ class Bucket : public std::enable_shared_from_this<Bucket>,
     std::filesystem::path const& getFilename() const;
     size_t getSize() const;
 
-    // Returns true if a BucketEntry that is key-wise identical to the given
-    // BucketEntry exists in the bucket. For testing.
-    bool containsBucketIdentity(BucketEntry const& id) const;
-
     bool isEmpty() const;
 
     // Delete index and close file stream
@@ -96,6 +105,62 @@ class Bucket : public std::enable_shared_from_this<Bucket>,
     // Sets index, throws if index is already set
     void setIndex(std::unique_ptr<BucketIndex const>&& index);
 
+    // Merge two buckets together, producing a fresh one. Entries in `oldBucket`
+    // are overridden in the fresh bucket by keywise-equal entries in
+    // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal
+    // entries in any of the buckets in the provided `shadows` vector.
+    //
+    // Each bucket is self-describing in terms of the ledger protocol version it
+    // was constructed under, and the merge algorithm adjusts to the maximum of
+    // the versions attached to each input or shadow bucket. The provided
+    // `maxProtocolVersion` bounds this (for error checking) and should usually
+    // be the protocol of the ledger header at which the merge is starting. An
+    // exception will be thrown if any provided bucket versions exceed it.
+    template <class BucketT>
+    static std::shared_ptr<BucketT>
+    merge(BucketManager& bucketManager, uint32_t maxProtocolVersion,
+          std::shared_ptr<BucketT> const& oldBucket,
+          std::shared_ptr<BucketT> const& newBucket,
+          std::vector<std::shared_ptr<BucketT>> const& shadows,
+          bool keepTombstoneEntries, bool countMergeEvents,
+          asio::io_context& ctx, bool doFsync);
+
+    static std::string randomBucketName(std::string const& tmpDir);
+    static std::string randomBucketIndexName(std::string const& tmpDir);
+
+#ifdef BUILD_TESTS
+    BucketIndex const&
+    getIndexForTesting() const
+    {
+        return getIndex();
+    }
+
+#endif // BUILD_TESTS
+
+    virtual uint32_t getBucketVersion() const = 0;
+
+    template <class BucketT> friend class BucketSnapshotBase;
+};
+
+/*
+ * Live Buckets are used by the LiveBucketList to store the current canonical
+ * state of the ledger. They contain entries of type BucketEntry.
+ */
+class LiveBucket : public Bucket,
+                   public std::enable_shared_from_this<LiveBucket>
+{
+  public:
+    LiveBucket();
+    virtual ~LiveBucket()
+    {
+    }
+    LiveBucket(std::string const& filename, Hash const& hash,
+               std::unique_ptr<BucketIndex const>&& index);
+
+    // Returns true if a BucketEntry that is key-wise identical to the given
+    // BucketEntry exists in the bucket. For testing.
+    bool containsBucketIdentity(BucketEntry const& id) const;
+
     // At version 11, we added support for INITENTRY and METAENTRY. Before this
     // we were only supporting LIVEENTRY and DEADENTRY.
     static constexpr ProtocolVersion
@@ -113,79 +178,94 @@ class Bucket : public std::enable_shared_from_this<Bucket>,
                          std::vector<LedgerEntry> const& liveEntries,
                          std::vector<LedgerKey> const& deadEntries);
 
-    static std::string randomBucketName(std::string const& tmpDir);
-    static std::string randomBucketIndexName(std::string const& tmpDir);
-
 #ifdef BUILD_TESTS
     // "Applies" the bucket to the database. For each entry in the bucket,
     // if the entry is init or live, creates or updates the corresponding
     // entry in the database (respectively; if the entry is dead (a
     // tombstone), deletes the corresponding entry in the database.
     void apply(Application& app) const;
-
-    BucketIndex const&
-    getIndexForTesting() const
-    {
-        return getIndex();
-    }
-
-#endif // BUILD_TESTS
+#endif
 
     // Returns false if eof reached, true otherwise. Modifies iter as the bucket
     // is scanned. Also modifies bytesToScan and maxEntriesToEvict such that
     // after this function returns:
     // bytesToScan -= amount_bytes_scanned
     // maxEntriesToEvict -= entries_evicted
-    bool scanForEvictionLegacy(AbstractLedgerTxn& ltx, EvictionIterator& iter,
-                               uint32_t& bytesToScan,
-                               uint32_t& remainingEntriesToEvict,
-                               uint32_t ledgerSeq,
-                               medida::Counter& entriesEvictedCounter,
-                               medida::Counter& bytesScannedForEvictionCounter,
-                               std::shared_ptr<EvictionStatistics> stats) const;
-
     bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan,
                          uint32_t ledgerSeq,
                          std::list<EvictionResultEntry>& evictableKeys,
-                         SearchableBucketListSnapshot& bl) const;
+                         SearchableBucketListSnapshot<LiveBucket>& bl) const;
 
     // Create a fresh bucket from given vectors of init (created) and live
     // (updated) LedgerEntries, and dead LedgerEntryKeys. The bucket will
     // be sorted, hashed, and adopted in the provided BucketManager.
-    static std::shared_ptr<Bucket>
+    static std::shared_ptr<LiveBucket>
     fresh(BucketManager& bucketManager, uint32_t protocolVersion,
           std::vector<LedgerEntry> const& initEntries,
           std::vector<LedgerEntry> const& liveEntries,
           std::vector<LedgerKey> const& deadEntries, bool countMergeEvents,
           asio::io_context& ctx, bool doFsync);
 
-    // Merge two buckets together, producing a fresh one. Entries in `oldBucket`
-    // are overridden in the fresh bucket by keywise-equal entries in
-    // `newBucket`. Entries are inhibited from the fresh bucket by keywise-equal
-    // entries in any of the buckets in the provided `shadows` vector.
-    //
-    // Each bucket is self-describing in terms of the ledger protocol version it
-    // was constructed under, and the merge algorithm adjusts to the maximum of
-    // the versions attached to each input or shadow bucket. The provided
-    // `maxProtocolVersion` bounds this (for error checking) and should usually
-    // be the protocol of the ledger header at which the merge is starting. An
-    // exception will be thrown if any provided bucket versions exceed it.
-    static std::shared_ptr<Bucket>
-    merge(BucketManager& bucketManager, uint32_t maxProtocolVersion,
-          std::shared_ptr<Bucket> const& oldBucket,
-          std::shared_ptr<Bucket> const& newBucket,
-          std::vector<std::shared_ptr<Bucket>> const& shadows,
-          bool keepDeadEntries, bool countMergeEvents, asio::io_context& ctx,
-          bool doFsync);
-
-    static uint32_t getBucketVersion(std::shared_ptr<Bucket> const& bucket);
-    static uint32_t
-    getBucketVersion(std::shared_ptr<Bucket const> const& bucket);
+    // Returns true if the given BucketEntry should be dropped in the bottom
+    // level bucket (i.e. DEADENTRY)
+    static bool isTombstoneEntry(BucketEntry const& e);
+
+    uint32_t getBucketVersion() const override;
+
     BucketEntryCounters const& getBucketEntryCounters() const;
-    friend class BucketSnapshot;
+
+    friend class LiveBucketSnapshot;
+};
+
+/*
+ * Hot Archive Buckets are used by the HotBucketList to store recently evicted
+ * entries. They contain entries of type HotArchiveBucketEntry.
+ */
+class HotArchiveBucket : public Bucket,
+                         public std::enable_shared_from_this<HotArchiveBucket>
+{
+    static std::vector<HotArchiveBucketEntry>
+    convertToBucketEntry(std::vector<LedgerEntry> const& archivedEntries,
+                         std::vector<LedgerKey> const& restoredEntries,
+                         std::vector<LedgerKey> const& deletedEntries);
+
+  public:
+    HotArchiveBucket();
+    virtual ~HotArchiveBucket()
+    {
+    }
+    HotArchiveBucket(std::string const& filename, Hash const& hash,
+                     std::unique_ptr<BucketIndex const>&& index);
+    uint32_t getBucketVersion() const override;
+
+    static std::shared_ptr<HotArchiveBucket>
+    fresh(BucketManager& bucketManager, uint32_t protocolVersion,
+          std::vector<LedgerEntry> const& archivedEntries,
+          std::vector<LedgerKey> const& restoredEntries,
+          std::vector<LedgerKey> const& deletedEntries, bool countMergeEvents,
+          asio::io_context& ctx, bool doFsync);
+
+    // Returns true if the given BucketEntry should be dropped in the bottom
+    // level bucket (i.e. HOT_ARCHIVE_LIVE)
+    static bool isTombstoneEntry(HotArchiveBucketEntry const& e);
+
+    friend class HotArchiveBucketSnapshot;
+};
+
+class ColdArchiveBucket : public Bucket,
+                          public std::enable_shared_from_this<ColdArchiveBucket>
+{
+  public:
+    ColdArchiveBucket();
+    virtual ~ColdArchiveBucket()
+    {
+    }
+    ColdArchiveBucket(std::string const& filename, Hash const& hash,
+                      std::unique_ptr<BucketIndex const>&& index);
+
+    uint32_t getBucketVersion() const override;
 };
 
-enum class LedgerEntryTypeAndDurability : uint32_t;
 struct BucketEntryCounters
 {
     std::map<LedgerEntryTypeAndDurability, size_t> entryTypeCounts;
diff --git a/src/bucket/BucketApplicator.cpp b/src/bucket/BucketApplicator.cpp
index 7c739aa6f2..a9400c9d18 100644
--- a/src/bucket/BucketApplicator.cpp
+++ b/src/bucket/BucketApplicator.cpp
@@ -9,6 +9,7 @@
 #include "ledger/LedgerTxn.h"
 #include "ledger/LedgerTxnEntry.h"
 #include "main/Application.h"
+#include "util/GlobalChecks.h"
 #include "util/Logging.h"
 #include "util/types.h"
 #include <fmt/format.h>
@@ -20,15 +21,13 @@ BucketApplicator::BucketApplicator(Application& app,
                                    uint32_t maxProtocolVersion,
                                    uint32_t minProtocolVersionSeen,
                                    uint32_t level,
-                                   std::shared_ptr<Bucket const> bucket,
-                                   std::function<bool(LedgerEntryType)> filter,
+                                   std::shared_ptr<LiveBucket const> bucket,
                                    std::unordered_set<LedgerKey>& seenKeys)
     : mApp(app)
     , mMaxProtocolVersion(maxProtocolVersion)
     , mMinProtocolVersionSeen(minProtocolVersionSeen)
     , mLevel(level)
     , mBucketIter(bucket)
-    , mEntryTypeFilter(filter)
     , mSeenKeys(seenKeys)
 {
     auto protocolVersion = mBucketIter.getMetadata().ledgerVersion;
@@ -40,8 +39,8 @@ BucketApplicator::BucketApplicator(Application& app,
             protocolVersion, mMaxProtocolVersion));
     }
 
-    // Only apply offers if BucketListDB is enabled
-    if (mApp.getConfig().isUsingBucketListDB() && !bucket->isEmpty())
+    // Only apply offers
+    if (!bucket->isEmpty())
     {
         auto offsetOp = bucket->getOfferRange();
         if (offsetOp)
@@ -62,10 +61,8 @@ BucketApplicator::operator bool() const
 {
     // There is more work to do (i.e. (bool) *this == true) iff:
     // 1. The underlying bucket iterator is not EOF and
-    // 2. Either BucketListDB is not enabled (so we must apply all entry types)
-    //    or BucketListDB is enabled and we have offers still remaining.
-    return static_cast<bool>(mBucketIter) &&
-           (!mApp.getConfig().isUsingBucketListDB() || mOffersRemaining);
+    // 2. We have offers still remaining.
+    return static_cast<bool>(mBucketIter) && mOffersRemaining;
 }
 
 size_t
@@ -81,12 +78,11 @@ BucketApplicator::size() const
 }
 
 static bool
-shouldApplyEntry(std::function<bool(LedgerEntryType)> const& filter,
-                 BucketEntry const& e)
+shouldApplyEntry(BucketEntry const& e)
 {
     if (e.type() == LIVEENTRY || e.type() == INITENTRY)
     {
-        return filter(e.liveEntry().data.type());
+        return BucketIndex::typeNotSupported(e.liveEntry().data.type());
     }
 
     if (e.type() != DEADENTRY)
@@ -94,7 +90,7 @@ shouldApplyEntry(std::function<bool(LedgerEntryType)> const& filter,
         throw std::runtime_error(
             "Malformed bucket: unexpected non-INIT/LIVE/DEAD entry.");
     }
-    return filter(e.deadEntry().type());
+    return BucketIndex::typeNotSupported(e.deadEntry().type());
 }
 
 size_t
@@ -110,11 +106,13 @@ BucketApplicator::advance(BucketApplicator::Counters& counters)
     // directly instead of creating a temporary inner LedgerTxn
     // as "advance" commits changes during each step this does not introduce any
     // new failure mode
+#ifdef BUILD_TESTS
     if (mApp.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         ltx = static_cast<AbstractLedgerTxn*>(&root);
     }
     else
+#endif
     {
         innerLtx = std::make_unique<LedgerTxn>(root, false);
         ltx = innerLtx.get();
@@ -127,99 +125,73 @@ BucketApplicator::advance(BucketApplicator::Counters& counters)
         // returns the file offset at the end of the currently loaded entry.
         // This means we must read until pos is strictly greater than the upper
         // bound so that we don't skip the last offer in the range.
-        auto isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB();
-        if (isUsingBucketListDB && mBucketIter.pos() > mUpperBoundOffset)
+        if (mBucketIter.pos() > mUpperBoundOffset)
         {
             mOffersRemaining = false;
             break;
         }
 
         BucketEntry const& e = *mBucketIter;
-        Bucket::checkProtocolLegality(e, mMaxProtocolVersion);
+        LiveBucket::checkProtocolLegality(e, mMaxProtocolVersion);
 
-        if (shouldApplyEntry(mEntryTypeFilter, e))
+        if (shouldApplyEntry(e))
         {
-            if (isUsingBucketListDB)
+            if (e.type() == LIVEENTRY || e.type() == INITENTRY)
             {
-                if (e.type() == LIVEENTRY || e.type() == INITENTRY)
-                {
-                    auto [_, wasInserted] =
-                        mSeenKeys.emplace(LedgerEntryKey(e.liveEntry()));
+                auto [_, wasInserted] =
+                    mSeenKeys.emplace(LedgerEntryKey(e.liveEntry()));
 
-                    // Skip seen keys
-                    if (!wasInserted)
-                    {
-                        continue;
-                    }
-                }
-                else
+                // Skip seen keys
+                if (!wasInserted)
                 {
-                    // Only apply INIT and LIVE entries
-                    mSeenKeys.emplace(e.deadEntry());
                     continue;
                 }
             }
+            else
+            {
+                // Only apply INIT and LIVE entries
+                mSeenKeys.emplace(e.deadEntry());
+                continue;
+            }
 
             counters.mark(e);
 
-            if (e.type() == LIVEENTRY || e.type() == INITENTRY)
+            // DEAD and META entries skipped
+            releaseAssert(e.type() == LIVEENTRY || e.type() == INITENTRY);
+            // The last level can have live entries, but at that point we
+            // know that they are actually init entries because the earliest
+            // state of all entries is init, so we mark them as such here
+            if (mLevel == LiveBucketList::kNumLevels - 1 &&
+                e.type() == LIVEENTRY)
             {
-                // The last level can have live entries, but at that point we
-                // know that they are actually init entries because the earliest
-                // state of all entries is init, so we mark them as such here
-                if (mLevel == BucketList::kNumLevels - 1 &&
-                    e.type() == LIVEENTRY)
-                {
-                    ltx->createWithoutLoading(e.liveEntry());
-                }
-                else if (
-                    protocolVersionIsBefore(
-                        mMinProtocolVersionSeen,
-                        Bucket::
-                            FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+                ltx->createWithoutLoading(e.liveEntry());
+            }
+            else if (protocolVersionIsBefore(
+                         mMinProtocolVersionSeen,
+                         LiveBucket::
+                             FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+            {
+                // Prior to protocol 11, INITENTRY didn't exist, so we need
+                // to check ltx to see if this is an update or a create
+                auto key = InternalLedgerEntry(e.liveEntry()).toKey();
+                if (ltx->getNewestVersion(key))
                 {
-                    // Prior to protocol 11, INITENTRY didn't exist, so we need
-                    // to check ltx to see if this is an update or a create
-                    auto key = InternalLedgerEntry(e.liveEntry()).toKey();
-                    if (ltx->getNewestVersion(key))
-                    {
-                        ltx->updateWithoutLoading(e.liveEntry());
-                    }
-                    else
-                    {
-                        ltx->createWithoutLoading(e.liveEntry());
-                    }
+                    ltx->updateWithoutLoading(e.liveEntry());
                 }
                 else
                 {
-                    if (e.type() == LIVEENTRY)
-                    {
-                        ltx->updateWithoutLoading(e.liveEntry());
-                    }
-                    else
-                    {
-                        ltx->createWithoutLoading(e.liveEntry());
-                    }
+                    ltx->createWithoutLoading(e.liveEntry());
                 }
             }
             else
             {
-                releaseAssertOrThrow(!isUsingBucketListDB);
-                if (protocolVersionIsBefore(
-                        mMinProtocolVersionSeen,
-                        Bucket::
-                            FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+                if (e.type() == LIVEENTRY)
                 {
-                    // Prior to protocol 11, DEAD entries could exist
-                    // without LIVE entries in between
-                    if (ltx->getNewestVersion(e.deadEntry()))
-                    {
-                        ltx->eraseWithoutLoading(e.deadEntry());
-                    }
+                    ltx->updateWithoutLoading(e.liveEntry());
                 }
                 else
                 {
-                    ltx->eraseWithoutLoading(e.deadEntry());
+                    ltx->createWithoutLoading(e.liveEntry());
                 }
             }
 
diff --git a/src/bucket/BucketApplicator.h b/src/bucket/BucketApplicator.h
index 88bc58ff6a..f96b6f4e44 100644
--- a/src/bucket/BucketApplicator.h
+++ b/src/bucket/BucketApplicator.h
@@ -24,9 +24,8 @@ class BucketApplicator
     uint32_t mMaxProtocolVersion;
     uint32_t mMinProtocolVersionSeen;
     uint32_t mLevel;
-    BucketInputIterator mBucketIter;
+    LiveBucketInputIterator mBucketIter;
     size_t mCount{0};
-    std::function<bool(LedgerEntryType)> mEntryTypeFilter;
     std::unordered_set<LedgerKey>& mSeenKeys;
     std::streamoff mUpperBoundOffset{0};
     bool mOffersRemaining{true};
@@ -72,8 +71,7 @@ class BucketApplicator
     // When this flag is set, each offer key read is added to seenKeys
     BucketApplicator(Application& app, uint32_t maxProtocolVersion,
                      uint32_t minProtocolVersionSeen, uint32_t level,
-                     std::shared_ptr<Bucket const> bucket,
-                     std::function<bool(LedgerEntryType)> filter,
+                     std::shared_ptr<LiveBucket const> bucket,
                      std::unordered_set<LedgerKey>& seenKeys);
     operator bool() const;
     size_t advance(Counters& counters);
diff --git a/src/bucket/BucketIndex.h b/src/bucket/BucketIndex.h
index 7dd34fc999..25604e28c0 100644
--- a/src/bucket/BucketIndex.h
+++ b/src/bucket/BucketIndex.h
@@ -84,6 +84,7 @@ class BucketIndex : public NonMovableOrCopyable
     // the largest buckets) and should only be called once. If pageSize == 0 or
     // if file size is less than the cutoff, individual key index is used.
     // Otherwise range index is used, with the range defined by pageSize.
+    template <class BucketEntryT>
     static std::unique_ptr<BucketIndex const>
     createIndex(BucketManager& bm, std::filesystem::path const& filename,
                 Hash const& hash);
diff --git a/src/bucket/BucketIndexImpl.cpp b/src/bucket/BucketIndexImpl.cpp
index f4108d0fb3..8fc9248c93 100644
--- a/src/bucket/BucketIndexImpl.cpp
+++ b/src/bucket/BucketIndexImpl.cpp
@@ -25,6 +25,7 @@
 
 #include <memory>
 #include <thread>
+#include <type_traits>
 #include <xdrpp/marshal.h>
 
 namespace stellar
@@ -66,16 +67,28 @@ BucketIndex::typeNotSupported(LedgerEntryType t)
 }
 
 template <class IndexT>
+template <class BucketEntryT>
 BucketIndexImpl<IndexT>::BucketIndexImpl(BucketManager& bm,
                                          std::filesystem::path const& filename,
                                          std::streamoff pageSize,
-                                         Hash const& hash)
+                                         Hash const& hash,
+                                         BucketEntryT const& typeTag)
     : mBloomMissMeter(bm.getBloomMissMeter())
     , mBloomLookupMeter(bm.getBloomLookupMeter())
 {
+    static_assert(std::is_same_v<BucketEntryT, BucketEntry> ||
+                  std::is_same_v<BucketEntryT, HotArchiveBucketEntry> ||
+                  std::is_same_v<BucketEntryT, ColdArchiveBucketEntry>);
+
     ZoneScoped;
     releaseAssert(!filename.empty());
 
+    // TODO: Add support for cold archive
+    if constexpr (std::is_same_v<BucketEntryT, ColdArchiveBucketEntry>)
+    {
+        return;
+    }
+
     {
         auto timer = LogSlowExecution("Indexing bucket");
         mData.pageSize = pageSize;
@@ -94,7 +107,7 @@ BucketIndexImpl<IndexT>::BucketIndexImpl(BucketManager& bm,
         in.open(filename.string());
         std::streamoff pos = 0;
         std::streamoff pageUpperBound = 0;
-        BucketEntry be;
+        BucketEntryT be;
         size_t iter = 0;
         size_t count = 0;
 
@@ -126,35 +139,56 @@ BucketIndexImpl<IndexT>::BucketIndexImpl(BucketManager& bm,
                 }
             }
 
-            if (be.type() != METAENTRY)
+            auto isMeta = [](auto const& be) {
+                if constexpr (std::is_same_v<BucketEntryT, BucketEntry>)
+                {
+                    return be.type() == METAENTRY;
+                }
+                else if constexpr (std::is_same_v<BucketEntryT,
+                                                  HotArchiveBucketEntry>)
+                {
+                    return be.type() == HOT_ARCHIVE_METAENTRY;
+                }
+                else
+                {
+                    return be.type() == COLD_ARCHIVE_METAENTRY;
+                }
+            };
+
+            if (!isMeta(be))
             {
                 ++count;
                 LedgerKey key = getBucketLedgerKey(be);
 
-                // We need an asset to poolID mapping for
-                // loadPoolshareTrustlineByAccountAndAsset queries. For this
-                // query, we only need to index INIT entries because:
-                // 1. PoolID is the hash of the Assets it refers to, so this
-                //    index cannot be invalidated by newer LIVEENTRY updates
-                // 2. We do a join over all bucket indexes so we avoid storing
-                //    multiple redundant index entries (i.e. LIVEENTRY updates)
-                // 3. We only use this index to collect the possible set of
-                //    Trustline keys, then we load those keys. This means that
-                //    we don't need to keep track of DEADENTRY. Even if a given
-                //    INITENTRY has been deleted by a newer DEADENTRY, the
-                //    trustline load will not return deleted trustlines, so the
-                //    load result is still correct even if the index has a few
-                //    deleted mappings.
-                if (be.type() == INITENTRY && key.type() == LIQUIDITY_POOL)
+                if constexpr (std::is_same_v<BucketEntryT, BucketEntry>)
                 {
-                    auto const& poolParams = be.liveEntry()
-                                                 .data.liquidityPool()
-                                                 .body.constantProduct()
-                                                 .params;
-                    mData.assetToPoolID[poolParams.assetA].emplace_back(
-                        key.liquidityPool().liquidityPoolID);
-                    mData.assetToPoolID[poolParams.assetB].emplace_back(
-                        key.liquidityPool().liquidityPoolID);
+                    // We need an asset to poolID mapping for
+                    // loadPoolshareTrustlineByAccountAndAsset queries. For this
+                    // query, we only need to index INIT entries because:
+                    // 1. PoolID is the hash of the Assets it refers to, so this
+                    //    index cannot be invalidated by newer LIVEENTRY updates
+                    // 2. We do a join over all bucket indexes so we avoid
+                    // storing
+                    //    multiple redundant index entries (i.e. LIVEENTRY
+                    //    updates)
+                    // 3. We only use this index to collect the possible set of
+                    //    Trustline keys, then we load those keys. This means
+                    //    that we don't need to keep track of DEADENTRY. Even if
+                    //    a given INITENTRY has been deleted by a newer
+                    //    DEADENTRY, the trustline load will not return deleted
+                    //    trustlines, so the load result is still correct even
+                    //    if the index has a few deleted mappings.
+                    if (be.type() == INITENTRY && key.type() == LIQUIDITY_POOL)
+                    {
+                        auto const& poolParams = be.liveEntry()
+                                                     .data.liquidityPool()
+                                                     .body.constantProduct()
+                                                     .params;
+                        mData.assetToPoolID[poolParams.assetA].emplace_back(
+                            key.liquidityPool().liquidityPoolID);
+                        mData.assetToPoolID[poolParams.assetB].emplace_back(
+                            key.liquidityPool().liquidityPoolID);
+                    }
                 }
 
                 if constexpr (std::is_same<IndexT, RangeIndex>::value)
@@ -182,7 +216,11 @@ BucketIndexImpl<IndexT>::BucketIndexImpl(BucketManager& bm,
                 {
                     mData.keysToOffset.emplace_back(key, pos);
                 }
-                countEntry(be);
+
+                if constexpr (std::is_same<BucketEntryT, BucketEntry>::value)
+                {
+                    countEntry(be);
+                }
             }
 
             pos = in.pos();
@@ -203,7 +241,7 @@ BucketIndexImpl<IndexT>::BucketIndexImpl(BucketManager& bm,
         ZoneValue(static_cast<int64_t>(count));
     }
 
-    if (bm.getConfig().isPersistingBucketListDBIndexes())
+    if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX)
     {
         saveToDisk(bm, hash);
     }
@@ -224,7 +262,7 @@ BucketIndexImpl<BucketIndex::RangeIndex>::saveToDisk(BucketManager& bm,
                                                      Hash const& hash) const
 {
     ZoneScoped;
-    releaseAssert(bm.getConfig().isPersistingBucketListDBIndexes());
+    releaseAssert(bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX);
     auto timer =
         LogSlowExecution("Saving index", LogSlowExecution::Mode::AUTOMATIC_RAII,
                          "took", std::chrono::milliseconds(100));
@@ -328,14 +366,18 @@ upper_bound_pred(LedgerKey const& key, IndexEntryT const& indexEntry)
     }
 }
 
+template <class BucketEntryT>
 std::unique_ptr<BucketIndex const>
 BucketIndex::createIndex(BucketManager& bm,
                          std::filesystem::path const& filename,
                          Hash const& hash)
 {
+    static_assert(std::is_same_v<BucketEntryT, BucketEntry> ||
+                  std::is_same_v<BucketEntryT, HotArchiveBucketEntry> ||
+                  std::is_same_v<BucketEntryT, ColdArchiveBucketEntry>);
+
     ZoneScoped;
     auto const& cfg = bm.getConfig();
-    releaseAssertOrThrow(cfg.isUsingBucketListDB());
     releaseAssertOrThrow(!filename.empty());
     auto pageSize = effectivePageSize(cfg, fs::size(filename.string()));
 
@@ -348,7 +390,8 @@ BucketIndex::createIndex(BucketManager& bm,
                        "bucket {}",
                        filename);
             return std::unique_ptr<BucketIndexImpl<IndividualIndex> const>(
-                new BucketIndexImpl<IndividualIndex>(bm, filename, 0, hash));
+                new BucketIndexImpl<IndividualIndex>(bm, filename, 0, hash,
+                                                     BucketEntryT{}));
         }
         else
         {
@@ -358,7 +401,8 @@ BucketIndex::createIndex(BucketManager& bm,
                        "{} in bucket {}",
                        pageSize, filename);
             return std::unique_ptr<BucketIndexImpl<RangeIndex> const>(
-                new BucketIndexImpl<RangeIndex>(bm, filename, pageSize, hash));
+                new BucketIndexImpl<RangeIndex>(bm, filename, pageSize, hash,
+                                                BucketEntryT{}));
         }
     }
     // BucketIndexImpl throws if BucketManager shuts down before index finishes,
@@ -605,4 +649,15 @@ BucketIndexImpl<IndexT>::getBucketEntryCounters() const
 {
     return mData.counters;
 }
+
+template std::unique_ptr<BucketIndex const>
+BucketIndex::createIndex<BucketEntry>(BucketManager& bm,
+                                      std::filesystem::path const& filename,
+                                      Hash const& hash);
+template std::unique_ptr<BucketIndex const>
+BucketIndex::createIndex<HotArchiveBucketEntry>(
+    BucketManager& bm, std::filesystem::path const& filename, Hash const& hash);
+template std::unique_ptr<BucketIndex const>
+BucketIndex::createIndex<ColdArchiveBucketEntry>(
+    BucketManager& bm, std::filesystem::path const& filename, Hash const& hash);
 }
diff --git a/src/bucket/BucketIndexImpl.h b/src/bucket/BucketIndexImpl.h
index d34155b055..f23ea9bdca 100644
--- a/src/bucket/BucketIndexImpl.h
+++ b/src/bucket/BucketIndexImpl.h
@@ -59,8 +59,13 @@ template <class IndexT> class BucketIndexImpl : public BucketIndex
     medida::Meter& mBloomMissMeter;
     medida::Meter& mBloomLookupMeter;
 
+    // Templated constructors are valid C++, but since this is a templated class
+    // already, there's no way for the compiler to deduce the type without a
+    // templated parameter, hence the tag
+    template <class BucketEntryT>
     BucketIndexImpl(BucketManager& bm, std::filesystem::path const& filename,
-                    std::streamoff pageSize, Hash const& hash);
+                    std::streamoff pageSize, Hash const& hash,
+                    BucketEntryT const& typeTag);
 
     template <class Archive>
     BucketIndexImpl(BucketManager const& bm, Archive& ar,
diff --git a/src/bucket/BucketInputIterator.cpp b/src/bucket/BucketInputIterator.cpp
index 7a3673b7f4..99f589a663 100644
--- a/src/bucket/BucketInputIterator.cpp
+++ b/src/bucket/BucketInputIterator.cpp
@@ -4,7 +4,9 @@
 
 #include "bucket/BucketInputIterator.h"
 #include "bucket/Bucket.h"
+#include "xdr/Stellar-ledger.h"
 #include <Tracy.hpp>
+#include <type_traits>
 
 namespace stellar
 {
@@ -12,14 +14,29 @@ namespace stellar
  * Helper class that reads from the file underlying a bucket, keeping the bucket
  * alive for the duration of its existence.
  */
+template <typename T>
 void
-BucketInputIterator::loadEntry()
+BucketInputIterator<T>::loadEntry()
 {
     ZoneScoped;
     if (mIn.readOne(mEntry))
     {
         mEntryPtr = &mEntry;
-        if (mEntry.type() == METAENTRY)
+        bool isMeta;
+        if constexpr (std::is_same_v<BucketEntryT, BucketEntry>)
+        {
+            isMeta = mEntry.type() == METAENTRY;
+        }
+        else if constexpr (std::is_same_v<BucketEntryT, HotArchiveBucketEntry>)
+        {
+            isMeta = mEntry.type() == HOT_ARCHIVE_METAENTRY;
+        }
+        else
+        {
+            isMeta = mEntry.type() == COLD_ARCHIVE_METAENTRY;
+        }
+
+        if (isMeta)
         {
             // There should only be one METAENTRY in the input stream
             // and it should be the first record.
@@ -34,6 +51,28 @@ BucketInputIterator::loadEntry()
                     "Malformed bucket: META after other entries.");
             }
             mMetadata = mEntry.metaEntry();
+
+            if constexpr (std::is_same_v<T, HotArchiveBucketEntry>)
+            {
+                if (mMetadata.ext.v() != 1 ||
+                    mMetadata.ext.bucketListType() != HOT_ARCHIVE)
+                {
+                    throw std::runtime_error(
+                        "Malformed bucket: META entry with incorrect bucket "
+                        "list type.");
+                }
+            }
+            else if constexpr (std::is_same_v<T, ColdArchiveBucketEntry>)
+            {
+                if (mMetadata.ext.v() != 1 ||
+                    mMetadata.ext.bucketListType() != COLD_ARCHIVE)
+                {
+                    throw std::runtime_error(
+                        "Malformed bucket: META entry with incorrect bucket "
+                        "list type.");
+                }
+            }
+
             mSeenMetadata = true;
             loadEntry();
         }
@@ -42,7 +81,11 @@ BucketInputIterator::loadEntry()
             mSeenOtherEntries = true;
             if (mSeenMetadata)
             {
-                Bucket::checkProtocolLegality(mEntry, mMetadata.ledgerVersion);
+                if constexpr (std::is_same_v<T, LiveBucket>)
+                {
+                    LiveBucket::checkProtocolLegality(mEntry,
+                                                      mMetadata.ledgerVersion);
+                }
             }
         }
     }
@@ -52,42 +95,48 @@ BucketInputIterator::loadEntry()
     }
 }
 
+template <typename T>
 std::streamoff
-BucketInputIterator::pos()
+BucketInputIterator<T>::pos()
 {
     return mIn.pos();
 }
 
+template <typename T>
 size_t
-BucketInputIterator::size() const
+BucketInputIterator<T>::size() const
 {
     return mIn.size();
 }
 
-BucketInputIterator::operator bool() const
+template <typename T> BucketInputIterator<T>::operator bool() const
 {
     return mEntryPtr != nullptr;
 }
 
-BucketEntry const&
-BucketInputIterator::operator*()
+template <typename T>
+typename BucketInputIterator<T>::BucketEntryT const&
+BucketInputIterator<T>::operator*()
 {
     return *mEntryPtr;
 }
 
+template <typename T>
 bool
-BucketInputIterator::seenMetadata() const
+BucketInputIterator<T>::seenMetadata() const
 {
     return mSeenMetadata;
 }
 
+template <typename T>
 BucketMetadata const&
-BucketInputIterator::getMetadata() const
+BucketInputIterator<T>::getMetadata() const
 {
     return mMetadata;
 }
 
-BucketInputIterator::BucketInputIterator(std::shared_ptr<Bucket const> bucket)
+template <typename T>
+BucketInputIterator<T>::BucketInputIterator(std::shared_ptr<T const> bucket)
     : mBucket(bucket), mEntryPtr(nullptr), mSeenMetadata(false)
 {
     // In absence of metadata, we treat every bucket as though it is from ledger
@@ -106,13 +155,14 @@ BucketInputIterator::BucketInputIterator(std::shared_ptr<Bucket const> bucket)
     }
 }
 
-BucketInputIterator::~BucketInputIterator()
+template <typename T> BucketInputIterator<T>::~BucketInputIterator()
 {
     mIn.close();
 }
 
-BucketInputIterator&
-BucketInputIterator::operator++()
+template <typename T>
+BucketInputIterator<T>&
+BucketInputIterator<T>::operator++()
 {
     if (mIn)
     {
@@ -125,10 +175,15 @@ BucketInputIterator::operator++()
     return *this;
 }
 
+template <typename T>
 void
-BucketInputIterator::seek(std::streamoff offset)
+BucketInputIterator<T>::seek(std::streamoff offset)
 {
     mIn.seek(offset);
     loadEntry();
 }
+
+template class BucketInputIterator<LiveBucket>;
+template class BucketInputIterator<HotArchiveBucket>;
+template class BucketInputIterator<ColdArchiveBucket>;
 }
diff --git a/src/bucket/BucketInputIterator.h b/src/bucket/BucketInputIterator.h
index 02bdb2f3ea..e406721433 100644
--- a/src/bucket/BucketInputIterator.h
+++ b/src/bucket/BucketInputIterator.h
@@ -8,23 +8,35 @@
 #include "xdr/Stellar-ledger.h"
 
 #include <memory>
+#include <type_traits>
 
 namespace stellar
 {
 
 class Bucket;
+class LiveBucket;
+class HotArchiveBucket;
 
 // Helper class that reads through the entries in a bucket.
-class BucketInputIterator
+template <typename BucketT> class BucketInputIterator
 {
-    std::shared_ptr<Bucket const> mBucket;
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket> ||
+                  std::is_same_v<BucketT, ColdArchiveBucket>);
+
+    using BucketEntryT = std::conditional_t<
+        std::is_same_v<BucketT, LiveBucket>, BucketEntry,
+        std::conditional_t<std::is_same_v<BucketT, HotArchiveBucket>,
+                           HotArchiveBucketEntry, ColdArchiveBucketEntry>>;
+
+    std::shared_ptr<BucketT const> mBucket;
 
     // Validity and current-value of the iterator is funneled into a
     // pointer. If
     // non-null, it points to mEntry.
-    BucketEntry const* mEntryPtr{nullptr};
+    BucketEntryT const* mEntryPtr{nullptr};
     XDRInputFileStream mIn;
-    BucketEntry mEntry;
+    BucketEntryT mEntry;
     bool mSeenMetadata{false};
     bool mSeenOtherEntries{false};
     BucketMetadata mMetadata;
@@ -43,9 +55,9 @@ class BucketInputIterator
     bool seenMetadata() const;
     BucketMetadata const& getMetadata() const;
 
-    BucketEntry const& operator*();
+    BucketEntryT const& operator*();
 
-    BucketInputIterator(std::shared_ptr<Bucket const> bucket);
+    BucketInputIterator(std::shared_ptr<BucketT const> bucket);
 
     ~BucketInputIterator();
 
@@ -55,4 +67,8 @@ class BucketInputIterator
     size_t size() const;
     void seek(std::streamoff offset);
 };
+
+typedef BucketInputIterator<LiveBucket> LiveBucketInputIterator;
+typedef BucketInputIterator<HotArchiveBucket> HotArchiveBucketInputIterator;
+typedef BucketInputIterator<ColdArchiveBucket> ColdArchiveBucketInputIterator;
 }
diff --git a/src/bucket/BucketList.cpp b/src/bucket/BucketList.cpp
index e714280a7e..419d168208 100644
--- a/src/bucket/BucketList.cpp
+++ b/src/bucket/BucketList.cpp
@@ -7,7 +7,9 @@
 #include "bucket/BucketIndexImpl.h"
 #include "bucket/BucketInputIterator.h"
 #include "bucket/BucketManager.h"
+#include "bucket/BucketOutputIterator.h"
 #include "bucket/BucketSnapshot.h"
+#include "bucket/LedgerCmp.h"
 #include "crypto/SHA.h"
 #include "ledger/LedgerManager.h"
 #include "ledger/LedgerTxn.h"
@@ -21,19 +23,30 @@
 
 #include <Tracy.hpp>
 #include <fmt/format.h>
+#include <future>
+#include <memory>
+#include <optional>
 
 namespace stellar
 {
 
-BucketLevel::BucketLevel(uint32_t i)
+template <> BucketListDepth BucketListBase<LiveBucket>::kNumLevels = 11;
+
+// TODO: This is an arbitrary number. Do some analysis and pick a better value
+// or make this a configurable network config.
+template <> BucketListDepth BucketListBase<HotArchiveBucket>::kNumLevels = 9;
+
+template <typename BucketT>
+BucketLevel<BucketT>::BucketLevel(uint32_t i)
     : mLevel(i)
-    , mCurr(std::make_shared<Bucket>())
-    , mSnap(std::make_shared<Bucket>())
+    , mCurr(std::make_shared<BucketT>())
+    , mSnap(std::make_shared<BucketT>())
 {
 }
 
+template <typename BucketT>
 uint256
-BucketLevel::getHash() const
+BucketLevel<BucketT>::getHash() const
 {
     SHA256 hsh;
     hsh.add(mCurr->getHash());
@@ -41,47 +54,59 @@ BucketLevel::getHash() const
     return hsh.finish();
 }
 
-FutureBucket const&
-BucketLevel::getNext() const
+template <typename BucketT>
+FutureBucket<BucketT> const&
+BucketLevel<BucketT>::getNext() const
 {
     return mNextCurr;
 }
 
-FutureBucket&
-BucketLevel::getNext()
+template <typename BucketT>
+FutureBucket<BucketT>&
+BucketLevel<BucketT>::getNext()
 {
     return mNextCurr;
 }
 
+template <typename BucketT>
 void
-BucketLevel::setNext(FutureBucket const& fb)
+BucketLevel<BucketT>::setNext(FutureBucket<BucketT> const& fb)
 {
     releaseAssert(threadIsMain());
     mNextCurr = fb;
 }
 
-std::shared_ptr<Bucket>
-BucketLevel::getCurr() const
+template <typename BucketT>
+std::shared_ptr<BucketT>
+BucketLevel<BucketT>::getCurr() const
 {
     return mCurr;
 }
 
-std::shared_ptr<Bucket>
-BucketLevel::getSnap() const
+template <typename BucketT>
+std::shared_ptr<BucketT>
+BucketLevel<BucketT>::getSnap() const
 {
     return mSnap;
 }
 
+template <typename BucketT>
 void
-BucketLevel::setCurr(std::shared_ptr<Bucket> b)
+BucketLevel<BucketT>::setCurr(std::shared_ptr<BucketT> b)
 {
     releaseAssert(threadIsMain());
     mNextCurr.clear();
     mCurr = b;
 }
 
+template <typename BucketT> BucketListBase<BucketT>::~BucketListBase()
+{
+}
+
+template <typename BucketT>
 bool
-BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level)
+BucketListBase<BucketT>::shouldMergeWithEmptyCurr(uint32_t ledger,
+                                                  uint32_t level)
 {
 
     if (level != 0)
@@ -89,7 +114,7 @@ BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level)
         // Round down the current ledger to when the merge was started, and
         // re-start the merge via prepare, mimicking the logic in `addBatch`
         auto mergeStartLedger =
-            roundDown(ledger, BucketList::levelHalf(level - 1));
+            roundDown(ledger, BucketListBase<BucketT>::levelHalf(level - 1));
 
         // Subtle: We're "preparing the next state" of this level's mCurr, which
         // is *either* mCurr merged with snap, or else just snap (if mCurr is
@@ -107,15 +132,17 @@ BucketList::shouldMergeWithEmptyCurr(uint32_t ledger, uint32_t level)
     return false;
 }
 
+template <typename BucketT>
 void
-BucketLevel::setSnap(std::shared_ptr<Bucket> b)
+BucketLevel<BucketT>::setSnap(std::shared_ptr<BucketT> b)
 {
     releaseAssert(threadIsMain());
     mSnap = b;
 }
 
+template <typename BucketT>
 void
-BucketLevel::commit()
+BucketLevel<BucketT>::commit()
 {
     if (mNextCurr.isLive())
     {
@@ -158,35 +185,51 @@ BucketLevel::commit()
 // ----------------------------------------------------------------------------------------
 // ...
 // clang-format on
+template <typename BucketT>
 void
-BucketLevel::prepare(Application& app, uint32_t currLedger,
-                     uint32_t currLedgerProtocol, std::shared_ptr<Bucket> snap,
-                     std::vector<std::shared_ptr<Bucket>> const& shadows,
-                     bool countMergeEvents)
+BucketLevel<BucketT>::prepare(
+    Application& app, uint32_t currLedger, uint32_t currLedgerProtocol,
+    std::shared_ptr<BucketT> snap,
+    std::vector<std::shared_ptr<BucketT>> const& shadows, bool countMergeEvents)
 {
     ZoneScoped;
     // If more than one absorb is pending at the same time, we have a logic
     // error in our caller (and all hell will break loose).
     releaseAssert(!mNextCurr.isMerging());
-    auto curr = BucketList::shouldMergeWithEmptyCurr(currLedger, mLevel)
-                    ? std::make_shared<Bucket>()
-                    : mCurr;
-
-    auto shadowsBasedOnProtocol =
-        protocolVersionStartsFrom(Bucket::getBucketVersion(snap),
-                                  Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)
-            ? std::vector<std::shared_ptr<Bucket>>()
-            : shadows;
-    mNextCurr = FutureBucket(app, curr, snap, shadowsBasedOnProtocol,
-                             currLedgerProtocol, countMergeEvents, mLevel);
+    auto curr =
+        BucketListBase<BucketT>::shouldMergeWithEmptyCurr(currLedger, mLevel)
+            ? std::make_shared<BucketT>()
+            : mCurr;
+
+    if constexpr (std::is_same_v<BucketT, LiveBucket>)
+    {
+        auto shadowsBasedOnProtocol =
+            protocolVersionStartsFrom(
+                snap->getBucketVersion(),
+                LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)
+                ? std::vector<std::shared_ptr<LiveBucket>>()
+                : shadows;
+        mNextCurr =
+            FutureBucket<BucketT>(app, curr, snap, shadowsBasedOnProtocol,
+                                  currLedgerProtocol, countMergeEvents, mLevel);
+    }
+    else
+    {
+        // HotArchive only exists for protocol > 21, should never have shadows
+        mNextCurr =
+            FutureBucket<BucketT>(app, curr, snap, /*shadows=*/{},
+                                  currLedgerProtocol, countMergeEvents, mLevel);
+    }
+
     releaseAssert(mNextCurr.isMerging());
 }
 
-std::shared_ptr<Bucket>
-BucketLevel::snap()
+template <typename BucketT>
+std::shared_ptr<BucketT>
+BucketLevel<BucketT>::snap()
 {
     mSnap = mCurr;
-    mCurr = std::make_shared<Bucket>();
+    mCurr = std::make_shared<BucketT>();
     return mSnap;
 }
 
@@ -221,8 +264,9 @@ BucketListDepth::operator uint32_t() const
 // levelSize(8)  =  262144=0x040000
 // levelSize(9)  = 1048576=0x100000
 // levelSize(10) = 4194304=0x400000
+template <typename BucketT>
 uint32_t
-BucketList::levelSize(uint32_t level)
+BucketListBase<BucketT>::levelSize(uint32_t level)
 {
     releaseAssert(level < kNumLevels);
     return 1UL << (2 * (level + 1));
@@ -243,14 +287,16 @@ BucketList::levelSize(uint32_t level)
 // levelHalf(8)  =  131072=0x020000
 // levelHalf(9)  =  524288=0x080000
 // levelHalf(10) = 2097152=0x200000
+template <typename BucketT>
 uint32_t
-BucketList::levelHalf(uint32_t level)
+BucketListBase<BucketT>::levelHalf(uint32_t level)
 {
     return levelSize(level) >> 1;
 }
 
+template <typename BucketT>
 uint32_t
-BucketList::sizeOfCurr(uint32_t ledger, uint32_t level)
+BucketListBase<BucketT>::sizeOfCurr(uint32_t ledger, uint32_t level)
 {
     releaseAssert(ledger != 0);
     releaseAssert(level < kNumLevels);
@@ -261,7 +307,8 @@ BucketList::sizeOfCurr(uint32_t ledger, uint32_t level)
 
     auto const size = levelSize(level);
     auto const half = levelHalf(level);
-    if (level != BucketList::kNumLevels - 1 && roundDown(ledger, half) != 0)
+    if (level != BucketListBase<BucketT>::kNumLevels - 1 &&
+        roundDown(ledger, half) != 0)
     {
         uint32_t const sizeDelta = 1UL << (2 * level - 1);
         if (roundDown(ledger, half) == ledger ||
@@ -297,12 +344,13 @@ BucketList::sizeOfCurr(uint32_t ledger, uint32_t level)
     }
 }
 
+template <typename BucketT>
 uint32_t
-BucketList::sizeOfSnap(uint32_t ledger, uint32_t level)
+BucketListBase<BucketT>::sizeOfSnap(uint32_t ledger, uint32_t level)
 {
     releaseAssert(ledger != 0);
     releaseAssert(level < kNumLevels);
-    if (level == BucketList::kNumLevels - 1)
+    if (level == BucketListBase<BucketT>::kNumLevels - 1)
     {
         return 0;
     }
@@ -323,8 +371,9 @@ BucketList::sizeOfSnap(uint32_t ledger, uint32_t level)
     }
 }
 
+template <typename BucketT>
 uint32_t
-BucketList::oldestLedgerInCurr(uint32_t ledger, uint32_t level)
+BucketListBase<BucketT>::oldestLedgerInCurr(uint32_t ledger, uint32_t level)
 {
     releaseAssert(ledger != 0);
     releaseAssert(level < kNumLevels);
@@ -343,8 +392,9 @@ BucketList::oldestLedgerInCurr(uint32_t ledger, uint32_t level)
     return count + 1;
 }
 
+template <typename BucketT>
 uint32_t
-BucketList::oldestLedgerInSnap(uint32_t ledger, uint32_t level)
+BucketListBase<BucketT>::oldestLedgerInSnap(uint32_t ledger, uint32_t level)
 {
     releaseAssert(ledger != 0);
     releaseAssert(level < kNumLevels);
@@ -362,8 +412,9 @@ BucketList::oldestLedgerInSnap(uint32_t ledger, uint32_t level)
     return count + 1;
 }
 
+template <typename BucketT>
 uint256
-BucketList::getHash() const
+BucketListBase<BucketT>::getHash() const
 {
     ZoneScoped;
     SHA256 hsh;
@@ -393,8 +444,9 @@ BucketList::getHash() const
 //
 // clang-format on
 
+template <typename BucketT>
 bool
-BucketList::levelShouldSpill(uint32_t ledger, uint32_t level)
+BucketListBase<BucketT>::levelShouldSpill(uint32_t ledger, uint32_t level)
 {
     if (level == kNumLevels - 1)
     {
@@ -411,8 +463,9 @@ BucketList::levelShouldSpill(uint32_t ledger, uint32_t level)
 // spill frequency of the level below.
 // incoming_spill_frequency(i) = 2^(2i - 1) for i > 0
 // incoming_spill_frequency(0) = 1
+template <typename BucketT>
 uint32_t
-BucketList::bucketUpdatePeriod(uint32_t level, bool isCurr)
+BucketListBase<BucketT>::bucketUpdatePeriod(uint32_t level, bool isCurr)
 {
     if (!isCurr)
     {
@@ -429,26 +482,30 @@ BucketList::bucketUpdatePeriod(uint32_t level, bool isCurr)
     return 1u << (2 * level - 1);
 }
 
+template <typename BucketT>
 bool
-BucketList::keepDeadEntries(uint32_t level)
+BucketListBase<BucketT>::keepTombstoneEntries(uint32_t level)
 {
-    return level < BucketList::kNumLevels - 1;
+    return level < BucketListBase<BucketT>::kNumLevels - 1;
 }
 
-BucketLevel const&
-BucketList::getLevel(uint32_t i) const
+template <typename BucketT>
+BucketLevel<BucketT> const&
+BucketListBase<BucketT>::getLevel(uint32_t i) const
 {
     return mLevels.at(i);
 }
 
-BucketLevel&
-BucketList::getLevel(uint32_t i)
+template <typename BucketT>
+BucketLevel<BucketT>&
+BucketListBase<BucketT>::getLevel(uint32_t i)
 {
     return mLevels.at(i);
 }
 
+template <typename BucketT>
 void
-BucketList::resolveAnyReadyFutures()
+BucketListBase<BucketT>::resolveAnyReadyFutures()
 {
     ZoneScoped;
     for (auto& level : mLevels)
@@ -460,8 +517,9 @@ BucketList::resolveAnyReadyFutures()
     }
 }
 
+template <typename BucketT>
 bool
-BucketList::futuresAllResolved(uint32_t maxLevel) const
+BucketListBase<BucketT>::futuresAllResolved(uint32_t maxLevel) const
 {
     ZoneScoped;
     releaseAssert(maxLevel < mLevels.size());
@@ -476,8 +534,9 @@ BucketList::futuresAllResolved(uint32_t maxLevel) const
     return true;
 }
 
+template <typename BucketT>
 uint32_t
-BucketList::getMaxMergeLevel(uint32_t currLedger) const
+BucketListBase<BucketT>::getMaxMergeLevel(uint32_t currLedger) const
 {
     uint32_t i = 0;
     for (; i < static_cast<uint32_t>(mLevels.size()) - 1; ++i)
@@ -490,14 +549,15 @@ BucketList::getMaxMergeLevel(uint32_t currLedger) const
     return i;
 }
 
+template <typename BucketT>
 uint64_t
-BucketList::getSize() const
+BucketListBase<BucketT>::getSize() const
 {
     uint64_t sum = 0;
     for (auto const& lev : mLevels)
     {
-        std::array<std::shared_ptr<Bucket>, 2> buckets = {lev.getCurr(),
-                                                          lev.getSnap()};
+        std::array<std::shared_ptr<BucketT>, 2> buckets = {lev.getCurr(),
+                                                           lev.getSnap()};
         for (auto const& b : buckets)
         {
             if (b)
@@ -511,16 +571,93 @@ BucketList::getSize() const
 }
 
 void
-BucketList::addBatch(Application& app, uint32_t currLedger,
-                     uint32_t currLedgerProtocol,
-                     std::vector<LedgerEntry> const& initEntries,
-                     std::vector<LedgerEntry> const& liveEntries,
-                     std::vector<LedgerKey> const& deadEntries)
+HotArchiveBucketList::addBatch(Application& app, uint32_t currLedger,
+                               uint32_t currLedgerProtocol,
+                               std::vector<LedgerEntry> const& archiveEntries,
+                               std::vector<LedgerKey> const& restoredEntries,
+                               std::vector<LedgerKey> const& deletedEntries)
+{
+    ZoneScoped;
+    releaseAssert(currLedger > 0);
+    releaseAssert(mState == HOT_ARCHIVE);
+
+    for (uint32_t i = static_cast<uint32>(mLevels.size()) - 1; i != 0; --i)
+    {
+        if (levelShouldSpill(currLedger, i - 1))
+        {
+            /**
+             * At every ledger, level[0] prepares the new batch and commits
+             * it.
+             *
+             * At ledger multiples of 2, level[0] snaps, level[1] commits
+             * existing (promotes next to curr) and "prepares" by starting a
+             * merge of that new level[1] curr with the new level[0] snap. This
+             * is "level 0 spilling".
+             *
+             * At ledger multiples of 8, level[1] snaps, level[2] commits
+             * existing (promotes next to curr) and "prepares" by starting a
+             * merge of that new level[2] curr with the new level[1] snap. This
+             * is "level 1 spilling".
+             *
+             * At ledger multiples of 32, level[2] snaps, level[3] commits
+             * existing (promotes next to curr) and "prepares" by starting a
+             * merge of that new level[3] curr with the new level[2] snap. This
+             * is "level 2 spilling".
+             *
+             * All these have to be done in _reverse_ order (counting down
+             * levels) because we want a 'curr' to be pulled out of the way into
+             * a 'snap' the moment it's half-a-level full, not have anything
+             * else spilled/added to it.
+             */
+
+            auto snap = mLevels[i - 1].snap();
+            mLevels[i].commit();
+            mLevels[i].prepare(app, currLedger, currLedgerProtocol, snap,
+                               /*shadows=*/{},
+                               /*countMergeEvents=*/true);
+        }
+    }
+
+    // In some testing scenarios, we want to inhibit counting level 0 merges
+    // because they are not repeated when restarting merges on app startup,
+    // and we are checking for an expected number of merge events on restart.
+    bool countMergeEvents =
+        !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING;
+    bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC;
+    mLevels[0].prepare(
+        app, currLedger, currLedgerProtocol,
+        HotArchiveBucket::fresh(app.getBucketManager(), currLedgerProtocol,
+                                archiveEntries, restoredEntries, deletedEntries,
+                                countMergeEvents, app.getClock().getIOContext(),
+                                doFsync),
+        /*shadows=*/{}, countMergeEvents);
+    mLevels[0].commit();
+
+    // We almost always want to try to resolve completed merges to single
+    // buckets, as it makes restarts less fragile: fewer saved/restored shadows,
+    // fewer buckets for the user to accidentally delete from their buckets
+    // dir. Also makes publication less likely to redo a merge that was already
+    // complete (but not resolved) when the snapshot gets taken.
+    //
+    // But we support the option of not-doing so, only for the sake of
+    // testing. Note: this is nonblocking in any case.
+    if (!app.getConfig().ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING)
+    {
+        resolveAnyReadyFutures();
+    }
+}
+
+void
+LiveBucketList::addBatch(Application& app, uint32_t currLedger,
+                         uint32_t currLedgerProtocol,
+                         std::vector<LedgerEntry> const& initEntries,
+                         std::vector<LedgerEntry> const& liveEntries,
+                         std::vector<LedgerKey> const& deadEntries)
 {
     ZoneScoped;
     releaseAssert(currLedger > 0);
 
-    std::vector<std::shared_ptr<Bucket>> shadows;
+    std::vector<std::shared_ptr<LiveBucket>> shadows;
     for (auto& level : mLevels)
     {
         shadows.push_back(level.getCurr());
@@ -610,12 +747,13 @@ BucketList::addBatch(Application& app, uint32_t currLedger,
         !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING;
     bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC;
     releaseAssert(shadows.size() == 0);
-    mLevels[0].prepare(app, currLedger, currLedgerProtocol,
-                       Bucket::fresh(app.getBucketManager(), currLedgerProtocol,
-                                     initEntries, liveEntries, deadEntries,
-                                     countMergeEvents,
-                                     app.getClock().getIOContext(), doFsync),
-                       shadows, countMergeEvents);
+    mLevels[0].prepare(
+        app, currLedger, currLedgerProtocol,
+        LiveBucket::fresh(app.getBucketManager(), currLedgerProtocol,
+                          initEntries, liveEntries, deadEntries,
+                          countMergeEvents, app.getClock().getIOContext(),
+                          doFsync),
+        shadows, countMergeEvents);
     mLevels[0].commit();
 
     // We almost always want to try to resolve completed merges to single
@@ -633,7 +771,7 @@ BucketList::addBatch(Application& app, uint32_t currLedger,
 }
 
 BucketEntryCounters
-BucketList::sumBucketEntryCounters() const
+LiveBucketList::sumBucketEntryCounters() const
 {
     BucketEntryCounters counters;
     for (auto const& lev : mLevels)
@@ -651,9 +789,9 @@ BucketList::sumBucketEntryCounters() const
 }
 
 void
-BucketList::updateStartingEvictionIterator(EvictionIterator& iter,
-                                           uint32_t firstScanLevel,
-                                           uint32_t ledgerSeq)
+LiveBucketList::updateStartingEvictionIterator(EvictionIterator& iter,
+                                               uint32_t firstScanLevel,
+                                               uint32_t ledgerSeq)
 {
     // Check if an upgrade has changed the starting scan level to below the
     // current iterator level
@@ -676,8 +814,8 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter,
     {
         // Check if bucket received an incoming spill
         releaseAssert(iter.bucketListLevel != 0);
-        if (BucketList::levelShouldSpill(ledgerSeq - 1,
-                                         iter.bucketListLevel - 1))
+        if (BucketListBase::levelShouldSpill(ledgerSeq - 1,
+                                             iter.bucketListLevel - 1))
         {
             // If Bucket changed, reset to start of bucket
             iter.bucketFileOffset = 0;
@@ -685,7 +823,8 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter,
     }
     else
     {
-        if (BucketList::levelShouldSpill(ledgerSeq - 1, iter.bucketListLevel))
+        if (BucketListBase::levelShouldSpill(ledgerSeq - 1,
+                                             iter.bucketListLevel))
         {
             // If Bucket changed, reset to start of bucket
             iter.bucketFileOffset = 0;
@@ -694,7 +833,7 @@ BucketList::updateStartingEvictionIterator(EvictionIterator& iter,
 }
 
 bool
-BucketList::updateEvictionIterAndRecordStats(
+LiveBucketList::updateEvictionIterAndRecordStats(
     EvictionIterator& iter, EvictionIterator startIter,
     uint32_t configFirstScanLevel, uint32_t ledgerSeq,
     std::shared_ptr<EvictionStatistics> stats, EvictionCounters& counters)
@@ -737,10 +876,10 @@ BucketList::updateEvictionIterAndRecordStats(
 }
 
 void
-BucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter,
-                                       uint32_t scanSize,
-                                       std::shared_ptr<Bucket const> b,
-                                       EvictionCounters& counters)
+LiveBucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter,
+                                           uint32_t scanSize,
+                                           std::shared_ptr<LiveBucket const> b,
+                                           EvictionCounters& counters)
 {
     // Check to see if we can finish scanning the new bucket before it
     // receives an update
@@ -754,61 +893,11 @@ BucketList::checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter,
     }
 }
 
-// To avoid noisy data, only count metrics that encompass a complete
-// eviction cycle. If a node joins the network mid cycle, metrics will be
-// nullopt and be initialized at the start of the next cycle.
-void
-BucketList::scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx,
-                                  uint32_t ledgerSeq,
-                                  EvictionCounters& counters,
-                                  std::shared_ptr<EvictionStatistics> stats)
-{
-    releaseAssert(stats);
-
-    auto getBucketFromIter = [&levels = mLevels](EvictionIterator const& iter) {
-        auto& level = levels.at(iter.bucketListLevel);
-        return iter.isCurrBucket ? level.getCurr() : level.getSnap();
-    };
-
-    auto const& networkConfig =
-        app.getLedgerManager().getSorobanNetworkConfig();
-    auto const firstScanLevel =
-        networkConfig.stateArchivalSettings().startingEvictionScanLevel;
-    auto evictionIter = networkConfig.evictionIterator();
-    auto scanSize = networkConfig.stateArchivalSettings().evictionScanSize;
-    auto maxEntriesToEvict =
-        networkConfig.stateArchivalSettings().maxEntriesToArchive;
-
-    updateStartingEvictionIterator(evictionIter, firstScanLevel, ledgerSeq);
-
-    auto startIter = evictionIter;
-    auto b = getBucketFromIter(evictionIter);
-
-    while (!b->scanForEvictionLegacy(
-        ltx, evictionIter, scanSize, maxEntriesToEvict, ledgerSeq,
-        counters.entriesEvicted, counters.bytesScannedForEviction, stats))
-    {
-
-        if (updateEvictionIterAndRecordStats(evictionIter, startIter,
-                                             firstScanLevel, ledgerSeq, stats,
-                                             counters))
-        {
-            break;
-        }
-
-        b = getBucketFromIter(evictionIter);
-        checkIfEvictionScanIsStuck(
-            evictionIter,
-            networkConfig.stateArchivalSettings().evictionScanSize, b,
-            counters);
-    }
-
-    networkConfig.updateEvictionIterator(ltx, evictionIter);
-}
-
+template <typename BucketT>
 void
-BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion,
-                          uint32_t ledger)
+BucketListBase<BucketT>::restartMerges(Application& app,
+                                       uint32_t maxProtocolVersion,
+                                       uint32_t ledger)
 {
     ZoneScoped;
     for (uint32_t i = 0; i < static_cast<uint32>(mLevels.size()); i++)
@@ -856,9 +945,9 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion,
                 return;
             }
 
-            auto version = Bucket::getBucketVersion(snap);
-            if (protocolVersionIsBefore(version,
-                                        Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+            auto version = snap->getBucketVersion();
+            if (protocolVersionIsBefore(
+                    version, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
             {
                 auto msg = fmt::format(
                     FMT_STRING("Invalid state: bucketlist level {:d} has clear "
@@ -870,7 +959,7 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion,
             // Round down the current ledger to when the merge was started, and
             // re-start the merge via prepare, mimicking the logic in `addBatch`
             auto mergeStartLedger =
-                roundDown(ledger, BucketList::levelHalf(i - 1));
+                roundDown(ledger, BucketListBase::levelHalf(i - 1));
             level.prepare(
                 app, mergeStartLedger, version, snap, /* shadows= */ {},
                 !app.getConfig().ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING);
@@ -878,13 +967,224 @@ BucketList::restartMerges(Application& app, uint32_t maxProtocolVersion,
     }
 }
 
-BucketListDepth BucketList::kNumLevels = 11;
+void
+HotArchiveBucketList::startColdArchiveMerge(Application& app,
+                                            HotArchiveBucketList const& bl,
+                                            uint32_t epoch,
+                                            uint32_t protocolVersion)
+{
+    releaseAssert(mState == HOT_ARCHIVE);
+    mState = PENDING_COLD_ARCHIVE;
+    mPendingColdArchive = std::make_unique<PendingColdArchive>(
+        app, *this, epoch, protocolVersion);
+}
+
+std::shared_ptr<ColdArchiveBucket>
+HotArchiveBucketList::resolveColdArchiveMerge()
+{
+    releaseAssert(mState == PENDING_COLD_ARCHIVE);
+    releaseAssert(mPendingColdArchive);
+    return mPendingColdArchive->resolve();
+}
+
+PendingColdArchive::PendingColdArchive(Application& app,
+                                       HotArchiveBucketList const& bl,
+                                       uint32_t epoch, uint32_t protocolVersion)
+    : mEpoch(epoch)
+{
+    // Input Iterators for Hot Archive being merged, in order (level 0 curr,
+    // level 0 snap, level 1 curr, ...)
+    std::vector<std::unique_ptr<HotArchiveBucketInputIterator>> inputBuckets;
+    for (auto i = 0; i < BucketListBase<HotArchiveBucket>::kNumLevels; ++i)
+    {
+        auto const& level = bl.getLevel(i);
+        inputBuckets.emplace_back(
+            std::make_unique<HotArchiveBucketInputIterator>(level.getCurr()));
+        if (i != BucketListBase<HotArchiveBucket>::kNumLevels - 1)
+        {
+            inputBuckets.emplace_back(
+                std::make_unique<HotArchiveBucketInputIterator>(
+                    level.getSnap()));
+        }
+    }
+
+    asio::io_context& ctx = app.getWorkerIOContext();
+    bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC;
+    auto& bm = app.getBucketManager();
+
+    using task_t = std::packaged_task<std::shared_ptr<ColdArchiveBucket>()>;
+    std::shared_ptr<task_t> task = std::make_shared<task_t>(
+        [this, &bm, protocolVersion, &ctx, doFsync,
+         inputs = std::move(inputBuckets), epoch]() mutable {
+            // First check if the merge result already exists
+            auto b = bm.getPendingColdArchiveBucketByEpoch(epoch);
+
+            // Merge result file already exists
+            if (!b->isEmpty())
+            {
+
+                CLOG_FATAL(Bucket, "NONEMPTY B");
+                if (!b->isIndexed())
+                {
+                    auto indexFilename = bm.bucketIndexFilename(b->getHash());
+                    std::unique_ptr<BucketIndex const> index;
+                    if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX &&
+                        fs::exists(indexFilename))
+                    {
+                        index =
+                            BucketIndex::load(bm, indexFilename, b->getSize());
+                    }
+                    else
+                    {
+                        index =
+                            BucketIndex::createIndex<ColdArchiveBucketEntry>(
+                                bm, b->getFilename(), b->getHash());
+                    }
+
+                    b->setIndex(std::move(index));
+                }
+                return b;
+            }
+            CLOG_FATAL(Bucket, "EMPTY B");
+            return merge(bm, protocolVersion, ctx, doFsync, std::move(inputs));
+        });
+
+    mMergeFuture = task->get_future();
+    app.postOnBackgroundThread(bind(&task_t::operator(), task),
+                               "PendingColdArchive: merge");
+}
+
+std::shared_ptr<ColdArchiveBucket>
+PendingColdArchive::resolve()
+{
+    return mMergeFuture.get();
+}
+
+std::shared_ptr<ColdArchiveBucket>
+PendingColdArchive::merge(
+    BucketManager& bucketManager, uint32_t protocolVersion,
+    asio::io_context& ctx, bool doFsync,
+    std::vector<std::unique_ptr<HotArchiveBucketInputIterator>>&& inputBuckets)
+{
+    ZoneScoped;
+    // Advance the input iterators to the next entry greater than bound, or
+    // until we reach the end of the iterator.
+    auto advanceIters = [&](LedgerKey const& bound) {
+        for (auto& iterPtr : inputBuckets)
+        {
+            auto& iter = *iterPtr;
+
+            // *iter <= bound
+            while (iter &&
+                   (LedgerEntryIdCmp{}(getBucketLedgerKey(*iter), bound) ||
+                    getBucketLedgerKey(*iter) == bound))
+            {
+                ++iter;
+            }
+        }
+    };
+
+    auto workRemaining = [&]() {
+        for (auto const& iterPtr : inputBuckets)
+        {
+            if (*iterPtr)
+            {
+                return true;
+            }
+        }
+
+        return false;
+    };
+
+    BucketMetadata meta;
+    meta.ledgerVersion = protocolVersion;
+
+    MergeCounters mc;
+    ColdArchiveBucketOutputIterator out(bucketManager.getTmpDir(), false, meta,
+                                        mc, ctx, doFsync, mEpoch);
+
+    uint32_t bucketIndex = 0;
 
-BucketList::BucketList()
+    // First write dummy lower bound entry
+    ColdArchiveBucketEntry lowBoundEntry;
+    lowBoundEntry.type(COLD_ARCHIVE_BOUNDARY_LEAF);
+    lowBoundEntry.boundaryLeaf().isLowerBound = true;
+    lowBoundEntry.boundaryLeaf().index = bucketIndex;
+    out.put(lowBoundEntry);
+    ++bucketIndex;
+
+    while (workRemaining())
+    {
+        // Find the smallest entry among all the input iterators to write. Tie
+        // break goes to the most recent level, so we iterate top down and
+        // compare with < operator.
+        std::optional<LedgerKey> currSmallest{};
+        uint32_t currSmallestIndex = 0;
+        for (auto i = 0; i < inputBuckets.size(); ++i)
+        {
+            auto& iter = *inputBuckets.at(i);
+            if (iter)
+            {
+                auto key = getBucketLedgerKey(*iter);
+                if (!currSmallest || LedgerEntryIdCmp{}(key, *currSmallest))
+                {
+                    currSmallest = key;
+                    currSmallestIndex = i;
+                }
+            }
+        }
+
+        releaseAssert(currSmallest);
+
+        ColdArchiveBucketEntry coldEntry;
+        auto entryToWrite = **inputBuckets.at(currSmallestIndex);
+        switch (entryToWrite.type())
+        {
+        // Live entries do not need to be written to the cold archive, so
+        // advance iterators but continue without writing
+        case HOT_ARCHIVE_LIVE:
+            advanceIters(*currSmallest);
+            continue;
+        case HOT_ARCHIVE_ARCHIVED:
+            coldEntry.type(COLD_ARCHIVE_ARCHIVED_LEAF);
+            coldEntry.archivedLeaf().archivedEntry =
+                entryToWrite.archivedEntry();
+            coldEntry.archivedLeaf().index = bucketIndex;
+            break;
+        case HOT_ARCHIVE_DELETED:
+            coldEntry.type(COLD_ARCHIVE_DELETED_LEAF);
+            coldEntry.deletedLeaf().deletedKey = entryToWrite.key();
+            coldEntry.deletedLeaf().index = bucketIndex;
+            break;
+        case HOT_ARCHIVE_METAENTRY:
+            throw std::runtime_error("Unexpected meta entry in HotArchive");
+        }
+
+        out.put(coldEntry);
+        ++bucketIndex;
+        advanceIters(*currSmallest);
+    }
+
+    // Now write upper bound
+    ColdArchiveBucketEntry upperBoundEntry;
+    upperBoundEntry.type(COLD_ARCHIVE_BOUNDARY_LEAF);
+    upperBoundEntry.boundaryLeaf().isLowerBound = false;
+    upperBoundEntry.boundaryLeaf().index = bucketIndex;
+    out.put(upperBoundEntry);
+
+    return out.getBucket(bucketManager);
+}
+
+template <typename BucketT> BucketListBase<BucketT>::BucketListBase()
 {
     for (uint32_t i = 0; i < kNumLevels; ++i)
     {
-        mLevels.push_back(BucketLevel(i));
+        mLevels.push_back(BucketLevel<BucketT>(i));
     }
 }
+
+template class BucketListBase<LiveBucket>;
+template class BucketListBase<HotArchiveBucket>;
+template class BucketLevel<LiveBucket>;
+template class BucketLevel<HotArchiveBucket>;
 }
diff --git a/src/bucket/BucketList.h b/src/bucket/BucketList.h
index 09549ac1ad..0e1a09dfea 100644
--- a/src/bucket/BucketList.h
+++ b/src/bucket/BucketList.h
@@ -5,7 +5,9 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "bucket/Bucket.h"
+#include "bucket/BucketInputIterator.h"
 #include "bucket/FutureBucket.h"
+#include <memory>
 
 namespace medida
 {
@@ -352,36 +354,39 @@ struct InflationWinner;
 
 namespace testutil
 {
-class BucketListDepthModifier;
+template <class BucketT> class BucketListDepthModifier;
 }
 
-class BucketLevel
+template <class BucketT> class BucketLevel
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
     uint32_t mLevel;
-    FutureBucket mNextCurr;
-    std::shared_ptr<Bucket> mCurr;
-    std::shared_ptr<Bucket> mSnap;
+    FutureBucket<BucketT> mNextCurr;
+    std::shared_ptr<BucketT> mCurr;
+    std::shared_ptr<BucketT> mSnap;
 
   public:
     BucketLevel(uint32_t i);
     uint256 getHash() const;
-    FutureBucket const& getNext() const;
-    FutureBucket& getNext();
-    std::shared_ptr<Bucket> getCurr() const;
-    std::shared_ptr<Bucket> getSnap() const;
-    void setNext(FutureBucket const& fb);
-    void setCurr(std::shared_ptr<Bucket>);
-    void setSnap(std::shared_ptr<Bucket>);
+    FutureBucket<BucketT> const& getNext() const;
+    FutureBucket<BucketT>& getNext();
+    std::shared_ptr<BucketT> getCurr() const;
+    std::shared_ptr<BucketT> getSnap() const;
+    void setNext(FutureBucket<BucketT> const& fb);
+    void setCurr(std::shared_ptr<BucketT>);
+    void setSnap(std::shared_ptr<BucketT>);
     void commit();
     void prepare(Application& app, uint32_t currLedger,
-                 uint32_t currLedgerProtocol, std::shared_ptr<Bucket> snap,
-                 std::vector<std::shared_ptr<Bucket>> const& shadows,
+                 uint32_t currLedgerProtocol, std::shared_ptr<BucketT> snap,
+                 std::vector<std::shared_ptr<BucketT>> const& shadows,
                  bool countMergeEvents);
-    std::shared_ptr<Bucket> snap();
+    std::shared_ptr<BucketT> snap();
 };
 
 // NOTE: The access specifications for this class have been carefully chosen to
-//       make it so BucketList::kNumLevels can only be modified from
+//       make it so LiveBucketList::kNumLevels can only be modified from
 //       BucketListDepthModifier -- not even BucketList can modify it. Please
 //       use care when modifying this class.
 class BucketListDepth
@@ -395,14 +400,27 @@ class BucketListDepth
 
     operator uint32_t() const;
 
-    friend class testutil::BucketListDepthModifier;
+    template <class BucketT> friend class testutil::BucketListDepthModifier;
 };
 
-class BucketList
+// While every BucketList shares the same high level structure wrt to spill
+// schedules, merges at the bucket level, etc, each BucketList type hold
+// different types of entries and has different merge logic at the individual
+// entry level. This pure virtual base class defines the shared structure of all
+// BucketLists. It must be extended for each specific BucketList type, where the
+// template parameter BucketT refers to the underlying Bucket type.
+template <class BucketT> class BucketListBase
 {
-    std::vector<BucketLevel> mLevels;
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
+  protected:
+    std::vector<BucketLevel<BucketT>> mLevels;
 
   public:
+    // Trivial pure virtual destructor to make this an abstract class
+    virtual ~BucketListBase() = 0;
+
     // Number of bucket levels in the bucketlist. Every bucketlist in the system
     // will have this many levels and it effectively gets wired-in to the
     // protocol. Careful about changing it.
@@ -436,50 +454,30 @@ class BucketList
     // should spill curr->snap and start merging snap into its next level.
     static bool levelShouldSpill(uint32_t ledger, uint32_t level);
 
-    // Returns true if at given `level` dead entries should be kept.
-    static bool keepDeadEntries(uint32_t level);
+    // Returns true if at given `level` tombstone entries should be kept. A
+    // "tombstone" entry is the entry type that represents null in the given
+    // BucketList. For LiveBucketList, this is DEADENTRY. For
+    // HotArchiveBucketList, HOT_ARCHIVE_LIVE.
+    static bool keepTombstoneEntries(uint32_t level);
 
     // Number of ledgers it takes a bucket to spill/receive an incoming spill
     static uint32_t bucketUpdatePeriod(uint32_t level, bool isCurr);
 
     // Create a new BucketList with every `kNumLevels` levels, each with
     // an empty bucket in `curr` and `snap`.
-    BucketList();
+    BucketListBase();
 
     // Return level `i` of the BucketList.
-    BucketLevel const& getLevel(uint32_t i) const;
+    BucketLevel<BucketT> const& getLevel(uint32_t i) const;
 
     // Return level `i` of the BucketList.
-    BucketLevel& getLevel(uint32_t i);
+    BucketLevel<BucketT>& getLevel(uint32_t i);
 
     // Return a cumulative hash of the entire bucketlist; this is the hash of
     // the concatenation of each level's hash, each of which in turn is the hash
     // of the concatenation of the hashes of the `curr` and `snap` buckets.
     Hash getHash() const;
 
-    // Reset Eviction Iterator position if an incoming spill or upgrade has
-    // invalidated the previous position
-    static void updateStartingEvictionIterator(EvictionIterator& iter,
-                                               uint32_t firstScanLevel,
-                                               uint32_t ledgerSeq);
-
-    // Update eviction iter and record stats after scanning a region in one
-    // bucket. Returns true if scan has looped back to startIter, false
-    // otherwise.
-    static bool updateEvictionIterAndRecordStats(
-        EvictionIterator& iter, EvictionIterator startIter,
-        uint32_t configFirstScanLevel, uint32_t ledgerSeq,
-        std::shared_ptr<EvictionStatistics> stats, EvictionCounters& counters);
-
-    static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter,
-                                           uint32_t scanSize,
-                                           std::shared_ptr<Bucket const> b,
-                                           EvictionCounters& counters);
-
-    void scanForEvictionLegacy(Application& app, AbstractLedgerTxn& ltx,
-                               uint32_t ledgerSeq, EvictionCounters& counters,
-                               std::shared_ptr<EvictionStatistics> stats);
-
     // Restart any merges that might be running on background worker threads,
     // merging buckets between levels. This needs to be called after forcing a
     // BucketList to adopt a new state, either at application restart or when
@@ -511,6 +509,34 @@ class BucketList
     // Returns the total size of the BucketList, in bytes, excluding all
     // FutureBuckets
     uint64_t getSize() const;
+};
+
+// The LiveBucketList stores the current canonical state of the ledger. It is
+// made up of LiveBucket buckets, which in turn store individual entries of type
+// BucketEntry. When an entry is "evicted" from the ledger, it is removed from
+// the LiveBucketList. Depending on the evicted entry type, it may then be added
+// to the HotArchiveBucketList.
+class LiveBucketList : public BucketListBase<LiveBucket>
+{
+  public:
+    // Reset Eviction Iterator position if an incoming spill or upgrade has
+    // invalidated the previous position
+    static void updateStartingEvictionIterator(EvictionIterator& iter,
+                                               uint32_t firstScanLevel,
+                                               uint32_t ledgerSeq);
+
+    // Update eviction iter and record stats after scanning a region in one
+    // bucket. Returns true if scan has looped back to startIter, false
+    // otherwise.
+    static bool updateEvictionIterAndRecordStats(
+        EvictionIterator& iter, EvictionIterator startIter,
+        uint32_t configFirstScanLevel, uint32_t ledgerSeq,
+        std::shared_ptr<EvictionStatistics> stats, EvictionCounters& counters);
+
+    static void checkIfEvictionScanIsStuck(EvictionIterator const& evictionIter,
+                                           uint32_t scanSize,
+                                           std::shared_ptr<LiveBucket const> b,
+                                           EvictionCounters& counters);
 
     // Add a batch of initial (created), live (updated) and dead entries to the
     // bucketlist, representing the entries effected by closing
@@ -524,6 +550,62 @@ class BucketList
                   std::vector<LedgerEntry> const& initEntries,
                   std::vector<LedgerEntry> const& liveEntries,
                   std::vector<LedgerKey> const& deadEntries);
+
     BucketEntryCounters sumBucketEntryCounters() const;
 };
+
+class HotArchiveBucketList;
+
+// Once the current epoch's HotArchiveBucketList is full, it is merged into
+// a single ColdArchiveBucket. We then initialize a ColdArchiveBucketList using
+// this bucket and slowly build up a Merkle root for the cold archive. This is a
+// lightweight class that is used to manage merging a HotArchiveBucketList into
+// a single ColdArchiveBucket. While this is done in the background, unlike
+// FutureBucket merges, PendingColdArchive merges cannot be performed across
+// restarts, but must be restarted from scratch if a node restarts before the
+// merge is completed.
+class PendingColdArchive
+{
+    uint32_t const mEpoch;
+    std::future<std::shared_ptr<ColdArchiveBucket>> mMergeFuture;
+
+    std::shared_ptr<ColdArchiveBucket>
+    merge(BucketManager& bucketManager, uint32_t protocolVersion,
+          asio::io_context& ctx, bool doFsync,
+          std::vector<std::unique_ptr<HotArchiveBucketInputIterator>>&&
+              inputBuckets);
+
+  public:
+    PendingColdArchive(Application& app, HotArchiveBucketList const& bl,
+                       uint32_t epoch, uint32_t protocolVersion);
+
+    std::shared_ptr<ColdArchiveBucket> resolve();
+};
+
+// The HotArchiveBucketList stores recently evicted entries. It contains Buckets
+// of type HotArchiveBucket, which store individual entries of type
+// HotArchiveBucketEntry.
+class HotArchiveBucketList : public BucketListBase<HotArchiveBucket>
+{
+  private:
+    enum State
+    {
+        HOT_ARCHIVE,
+        PENDING_COLD_ARCHIVE,
+    };
+
+    State mState{HOT_ARCHIVE};
+    std::unique_ptr<PendingColdArchive> mPendingColdArchive{};
+
+  public:
+    void addBatch(Application& app, uint32_t currLedger,
+                  uint32_t currLedgerProtocol,
+                  std::vector<LedgerEntry> const& archiveEntries,
+                  std::vector<LedgerKey> const& restoredEntries,
+                  std::vector<LedgerKey> const& deletedEntries);
+
+    void startColdArchiveMerge(Application& app, HotArchiveBucketList const& bl,
+                               uint32_t epoch, uint32_t protocolVersion);
+    std::shared_ptr<ColdArchiveBucket> resolveColdArchiveMerge();
+};
 }
diff --git a/src/bucket/BucketListSnapshot.cpp b/src/bucket/BucketListSnapshot.cpp
index 5d26fd8296..b071567b09 100644
--- a/src/bucket/BucketListSnapshot.cpp
+++ b/src/bucket/BucketListSnapshot.cpp
@@ -3,59 +3,74 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "bucket/BucketListSnapshot.h"
+#include "bucket/Bucket.h"
 #include "bucket/BucketInputIterator.h"
+#include "bucket/BucketList.h"
+#include "bucket/BucketSnapshot.h"
 #include "crypto/SecretKey.h" // IWYU pragma: keep
 #include "ledger/LedgerTxn.h"
 
 #include "medida/timer.h"
 #include "util/GlobalChecks.h"
+#include <optional>
+#include <vector>
 
 namespace stellar
 {
-
-BucketListSnapshot::BucketListSnapshot(BucketList const& bl,
-                                       LedgerHeader header)
+template <class BucketT>
+BucketListSnapshot<BucketT>::BucketListSnapshot(
+    BucketListBase<BucketT> const& bl, LedgerHeader header)
     : mHeader(std::move(header))
 {
     releaseAssert(threadIsMain());
 
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < BucketListBase<BucketT>::kNumLevels; ++i)
     {
         auto const& level = bl.getLevel(i);
-        mLevels.emplace_back(BucketLevelSnapshot(level));
+        mLevels.emplace_back(BucketLevelSnapshot<BucketT>(level));
     }
 }
 
-BucketListSnapshot::BucketListSnapshot(BucketListSnapshot const& snapshot)
+template <class BucketT>
+BucketListSnapshot<BucketT>::BucketListSnapshot(
+    BucketListSnapshot<BucketT> const& snapshot)
     : mLevels(snapshot.mLevels), mHeader(snapshot.mHeader)
 {
 }
 
-std::vector<BucketLevelSnapshot> const&
-BucketListSnapshot::getLevels() const
+template <class BucketT>
+std::vector<BucketLevelSnapshot<BucketT>> const&
+BucketListSnapshot<BucketT>::getLevels() const
 {
     return mLevels;
 }
 
+template <class BucketT>
 uint32_t
-BucketListSnapshot::getLedgerSeq() const
+BucketListSnapshot<BucketT>::getLedgerSeq() const
 {
     return mHeader.ledgerSeq;
 }
 
-// Loops through all buckets in the given snapshot, starting with curr at level
-// 0, then snap at level 0, etc. Calls f on each bucket. Exits early if function
-// returns true
-namespace
+template <class BucketT>
+LedgerHeader const&
+SearchableBucketListSnapshotBase<BucketT>::getLedgerHeader()
 {
+    releaseAssert(mSnapshot);
+    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
+    return mSnapshot->getLedgerHeader();
+}
+
+template <class BucketT>
 void
-loopAllBuckets(std::function<bool(BucketSnapshot const&)> f,
-               BucketListSnapshot const& snapshot)
+SearchableBucketListSnapshotBase<BucketT>::loopAllBuckets(
+    std::function<bool(BucketSnapshotT const&)> f,
+    BucketListSnapshot<BucketT> const& snapshot) const
 {
     for (auto const& lev : snapshot.getLevels())
     {
         // Return true if we should exit loop early
-        auto processBucket = [f](BucketSnapshot const& b) {
+        auto processBucket = [f](BucketSnapshotT const& b) {
             if (b.isEmpty())
             {
                 return false;
@@ -71,72 +86,8 @@ loopAllBuckets(std::function<bool(BucketSnapshot const&)> f,
     }
 }
 
-// Loads bucket entry for LedgerKey k. Returns <LedgerEntry, bloomMiss>,
-// where bloomMiss is true if a bloom miss occurred during the load.
-std::pair<std::shared_ptr<LedgerEntry>, bool>
-getLedgerEntryInternal(LedgerKey const& k, BucketListSnapshot const& snapshot)
-{
-    std::shared_ptr<LedgerEntry> result{};
-    auto sawBloomMiss = false;
-
-    auto f = [&](BucketSnapshot const& b) {
-        auto [be, bloomMiss] = b.getBucketEntry(k);
-        sawBloomMiss = sawBloomMiss || bloomMiss;
-
-        if (be.has_value())
-        {
-            result =
-                be.value().type() == DEADENTRY
-                    ? nullptr
-                    : std::make_shared<LedgerEntry>(be.value().liveEntry());
-            return true;
-        }
-        else
-        {
-            return false;
-        }
-    };
-
-    loopAllBuckets(f, snapshot);
-    return {result, sawBloomMiss};
-}
-
-std::vector<LedgerEntry>
-loadKeysInternal(std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys,
-                 BucketListSnapshot const& snapshot, LedgerKeyMeter* lkMeter)
-{
-    std::vector<LedgerEntry> entries;
-
-    // Make a copy of the key set, this loop is destructive
-    auto keys = inKeys;
-    auto f = [&](BucketSnapshot const& b) {
-        b.loadKeysWithLimits(keys, entries, lkMeter);
-        return keys.empty();
-    };
-
-    loopAllBuckets(f, snapshot);
-    return entries;
-}
-
-}
-
-uint32_t
-SearchableBucketListSnapshot::getLedgerSeq() const
-{
-    releaseAssert(mSnapshot);
-    return mSnapshot->getLedgerSeq();
-}
-
-LedgerHeader const&
-SearchableBucketListSnapshot::getLedgerHeader()
-{
-    releaseAssert(mSnapshot);
-    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
-    return mSnapshot->getLedgerHeader();
-}
-
 EvictionResult
-SearchableBucketListSnapshot::scanForEviction(
+SearchableLiveBucketListSnapshot::scanForEviction(
     uint32_t ledgerSeq, EvictionCounters& counters,
     EvictionIterator evictionIter, std::shared_ptr<EvictionStatistics> stats,
     StateArchivalSettings const& sas)
@@ -146,12 +97,12 @@ SearchableBucketListSnapshot::scanForEviction(
 
     auto getBucketFromIter =
         [&levels = mSnapshot->getLevels()](
-            EvictionIterator const& iter) -> BucketSnapshot const& {
+            EvictionIterator const& iter) -> LiveBucketSnapshot const& {
         auto& level = levels.at(iter.bucketListLevel);
         return iter.isCurrBucket ? level.curr : level.snap;
     };
 
-    BucketList::updateStartingEvictionIterator(
+    LiveBucketList::updateStartingEvictionIterator(
         evictionIter, sas.startingEvictionScanLevel, ledgerSeq);
 
     EvictionResult result(sas);
@@ -161,7 +112,7 @@ SearchableBucketListSnapshot::scanForEviction(
     for (;;)
     {
         auto const& b = getBucketFromIter(evictionIter);
-        BucketList::checkIfEvictionScanIsStuck(
+        LiveBucketList::checkIfEvictionScanIsStuck(
             evictionIter, sas.evictionScanSize, b.getRawBucket(), counters);
 
         // If we scan scanSize before hitting bucket EOF, exit early
@@ -172,7 +123,7 @@ SearchableBucketListSnapshot::scanForEviction(
         }
 
         // If we return back to the Bucket we started at, exit
-        if (BucketList::updateEvictionIterAndRecordStats(
+        if (LiveBucketList::updateEvictionIterAndRecordStats(
                 evictionIter, startIter, sas.startingEvictionScanLevel,
                 ledgerSeq, stats, counters))
         {
@@ -185,71 +136,113 @@ SearchableBucketListSnapshot::scanForEviction(
     return result;
 }
 
-std::shared_ptr<LedgerEntry>
-SearchableBucketListSnapshot::load(LedgerKey const& k)
+std::vector<LedgerEntry>
+SearchableLiveBucketListSnapshot::loadKeysWithLimits(
+    std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys,
+    LedgerKeyMeter* lkMeter)
 {
     ZoneScoped;
-    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
-    releaseAssert(mSnapshot);
 
+    // Make a copy of the key set, this loop is destructive
+    auto keys = inKeys;
+    std::vector<LedgerEntry> entries;
+    auto loadKeysLoop = [&](auto const& b) {
+        b.loadKeys(keys, entries, lkMeter);
+        return keys.empty();
+    };
+
+    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
     if (threadIsMain())
     {
-        mSnapshotManager.startPointLoadTimer();
-        auto [result, bloomMiss] = getLedgerEntryInternal(k, *mSnapshot);
-        mSnapshotManager.endPointLoadTimer(k.type(), bloomMiss);
-        return result;
+        auto timer =
+            mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size())
+                .TimeScope();
+        loopAllBuckets(loadKeysLoop, *mSnapshot);
     }
     else
     {
-        auto [result, bloomMiss] = getLedgerEntryInternal(k, *mSnapshot);
-        return result;
+        // TODO: Background metrics
+        loopAllBuckets(loadKeysLoop, *mSnapshot);
     }
+
+    return entries;
 }
 
-std::pair<std::vector<LedgerEntry>, bool>
-SearchableBucketListSnapshot::loadKeysFromLedger(
+std::optional<std::vector<LedgerEntry>>
+SearchableLiveBucketListSnapshot::loadKeysFromLedger(
     std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys, uint32_t ledgerSeq)
 {
     ZoneScoped;
+
+    // Make a copy of the key set, this loop is destructive
+    auto keys = inKeys;
+    std::vector<LedgerEntry> entries;
+    auto loadKeysLoop = [&](auto const& b) {
+        b.loadKeys(keys, entries, /*lkMeter=*/nullptr);
+        return keys.empty();
+    };
+
     mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
-    releaseAssert(mSnapshot);
 
     if (ledgerSeq == mSnapshot->getLedgerSeq())
     {
-        auto result = loadKeysInternal(inKeys, *mSnapshot, /*lkMeter=*/nullptr);
-        return {result, true};
+        loopAllBuckets(loadKeysLoop, *mSnapshot);
     }
-
-    auto iter = mHistoricalSnapshots.find(ledgerSeq);
-    if (iter == mHistoricalSnapshots.end())
+    else
     {
-        return {{}, false};
+        auto iter = mHistoricalSnapshots.find(ledgerSeq);
+        if (iter == mHistoricalSnapshots.end())
+        {
+            return std::nullopt;
+        }
+
+        releaseAssert(iter->second);
+        loopAllBuckets(loadKeysLoop, *iter->second);
     }
 
-    releaseAssert(iter->second);
-    auto result = loadKeysInternal(inKeys, *iter->second, /*lkMeter=*/nullptr);
-    return {result, true};
+    return entries;
 }
 
-std::vector<LedgerEntry>
-SearchableBucketListSnapshot::loadKeysWithLimits(
-    std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys,
-    LedgerKeyMeter* lkMeter)
+std::shared_ptr<LedgerEntry>
+SearchableLiveBucketListSnapshot::load(LedgerKey const& k)
 {
     ZoneScoped;
-    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
-    releaseAssert(mSnapshot);
 
+    std::shared_ptr<LedgerEntry> result{};
+    auto sawBloomMiss = false;
+
+    // Search function called on each Bucket in BucketList until we find the key
+    auto loadKeyBucketLoop = [&](auto const& b) {
+        auto [be, bloomMiss] = b.getBucketEntry(k);
+        sawBloomMiss = sawBloomMiss || bloomMiss;
+
+        if (be)
+        {
+            result = LiveBucket::isTombstoneEntry(*be)
+                         ? nullptr
+                         : std::make_shared<LedgerEntry>(be->liveEntry());
+
+            return true;
+        }
+        else
+        {
+            return false;
+        }
+    };
+
+    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
     if (threadIsMain())
     {
-        auto timer =
-            mSnapshotManager.recordBulkLoadMetrics("prefetch", inKeys.size())
-                .TimeScope();
-        return loadKeysInternal(inKeys, *mSnapshot, lkMeter);
+        mSnapshotManager.startPointLoadTimer();
+        loopAllBuckets(loadKeyBucketLoop, *mSnapshot);
+        mSnapshotManager.endPointLoadTimer(k.type(), sawBloomMiss);
+        return result;
     }
     else
     {
-        return loadKeysInternal(inKeys, *mSnapshot, lkMeter);
+        // TODO: Background metrics
+        loopAllBuckets(loadKeyBucketLoop, *mSnapshot);
+        return result;
     }
 }
 
@@ -259,7 +252,7 @@ SearchableBucketListSnapshot::loadKeysWithLimits(
 //  2. Perform a bulk lookup for all possible trustline keys, that is, all
 //     trustlines with the given accountID and poolID from step 1
 std::vector<LedgerEntry>
-SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset(
+SearchableLiveBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset(
     AccountID const& accountID, Asset const& asset)
 {
     ZoneScoped;
@@ -271,7 +264,8 @@ SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset(
 
     LedgerKeySet trustlinesToLoad;
 
-    auto trustLineLoop = [&](BucketSnapshot const& b) {
+    auto trustLineLoop = [&](auto const& rawB) {
+        auto const& b = static_cast<LiveBucketSnapshot const&>(rawB);
         for (auto const& poolID : b.getPoolIDsByAsset(asset))
         {
             LedgerKey trustlineKey(TRUSTLINE);
@@ -290,12 +284,20 @@ SearchableBucketListSnapshot::loadPoolShareTrustLinesByAccountAndAsset(
                      .recordBulkLoadMetrics("poolshareTrustlines",
                                             trustlinesToLoad.size())
                      .TimeScope();
-    return loadKeysInternal(trustlinesToLoad, *mSnapshot, nullptr);
+
+    std::vector<LedgerEntry> result;
+    auto loadKeysLoop = [&](auto const& b) {
+        b.loadKeys(trustlinesToLoad, result, /*lkMeter=*/nullptr);
+        return trustlinesToLoad.empty();
+    };
+
+    loopAllBuckets(loadKeysLoop, *mSnapshot);
+    return result;
 }
 
 std::vector<InflationWinner>
-SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners,
-                                                   int64_t minBalance)
+SearchableLiveBucketListSnapshot::loadInflationWinners(size_t maxWinners,
+                                                       int64_t minBalance)
 {
     ZoneScoped;
     mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
@@ -310,8 +312,8 @@ SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners,
     UnorderedMap<AccountID, int64_t> voteCount;
     UnorderedSet<AccountID> seen;
 
-    auto countVotesInBucket = [&](BucketSnapshot const& b) {
-        for (BucketInputIterator in(b.getRawBucket()); in; ++in)
+    auto countVotesInBucket = [&](LiveBucketSnapshot const& b) {
+        for (LiveBucketInputIterator in(b.getRawBucket()); in; ++in)
         {
             BucketEntry const& be = *in;
             if (be.type() == DEADENTRY)
@@ -386,17 +388,114 @@ SearchableBucketListSnapshot::loadInflationWinners(size_t maxWinners,
     return winners;
 }
 
-BucketLevelSnapshot::BucketLevelSnapshot(BucketLevel const& level)
+template <class BucketT>
+BucketLevelSnapshot<BucketT>::BucketLevelSnapshot(
+    BucketLevel<BucketT> const& level)
     : curr(level.getCurr()), snap(level.getSnap())
 {
 }
 
-SearchableBucketListSnapshot::SearchableBucketListSnapshot(
+template <class BucketT>
+SearchableBucketListSnapshotBase<BucketT>::SearchableBucketListSnapshotBase(
     BucketSnapshotManager const& snapshotManager)
     : mSnapshotManager(snapshotManager), mHistoricalSnapshots()
 {
-    // Initialize snapshot from SnapshotManager
+
     mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
 }
 
+template <class BucketT>
+SearchableBucketListSnapshotBase<BucketT>::~SearchableBucketListSnapshotBase()
+{
+}
+
+SearchableLiveBucketListSnapshot::SearchableLiveBucketListSnapshot(
+    BucketSnapshotManager const& snapshotManager)
+    : SearchableBucketListSnapshotBase<LiveBucket>(snapshotManager)
+{
+}
+
+SearchableHotArchiveBucketListSnapshot::SearchableHotArchiveBucketListSnapshot(
+    BucketSnapshotManager const& snapshotManager)
+    : SearchableBucketListSnapshotBase<HotArchiveBucket>(snapshotManager)
+{
+}
+
+std::shared_ptr<HotArchiveBucketEntry>
+SearchableHotArchiveBucketListSnapshot::load(LedgerKey const& k)
+{
+    ZoneScoped;
+
+    // Search function called on each Bucket in BucketList until we find the key
+    std::shared_ptr<HotArchiveBucketEntry> result{};
+    auto loadKeyBucketLoop = [&](auto const& b) {
+        auto [be, _] = b.getBucketEntry(k);
+
+        if (be)
+        {
+            result = HotArchiveBucket::isTombstoneEntry(*be) ? nullptr : be;
+            return true;
+        }
+        else
+        {
+            return false;
+        }
+    };
+
+    // TODO: Metrics
+    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
+    loopAllBuckets(loadKeyBucketLoop, *mSnapshot);
+    return result;
+}
+
+std::vector<HotArchiveBucketEntry>
+SearchableHotArchiveBucketListSnapshot::loadKeys(
+    std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys)
+{
+    auto op = loadKeysFromLedger(inKeys, getLedgerSeq());
+    releaseAssertOrThrow(op);
+    return std::move(*op);
+}
+
+std::optional<std::vector<HotArchiveBucketEntry>>
+SearchableHotArchiveBucketListSnapshot::loadKeysFromLedger(
+    std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys, uint32_t ledgerSeq)
+{
+    ZoneScoped;
+    std::vector<HotArchiveBucketEntry> entries;
+
+    // Make a copy of the key set, this loop is destructive
+    auto keys = inKeys;
+    auto loadKeysLoop = [&](auto const& b) {
+        b.loadKeys(keys, entries, /*lkMeter=*/nullptr);
+        return keys.empty();
+    };
+
+    mSnapshotManager.maybeUpdateSnapshot(mSnapshot, mHistoricalSnapshots);
+
+    if (ledgerSeq == mSnapshot->getLedgerSeq())
+    {
+        loopAllBuckets(loadKeysLoop, *mSnapshot);
+    }
+    else
+    {
+        auto iter = mHistoricalSnapshots.find(ledgerSeq);
+        if (iter == mHistoricalSnapshots.end())
+        {
+            return std::nullopt;
+        }
+
+        releaseAssert(iter->second);
+        loopAllBuckets(loadKeysLoop, *iter->second);
+    }
+
+    return entries;
+}
+
+template struct BucketLevelSnapshot<LiveBucket>;
+template struct BucketLevelSnapshot<HotArchiveBucket>;
+template class BucketListSnapshot<LiveBucket>;
+template class BucketListSnapshot<HotArchiveBucket>;
+template class SearchableBucketListSnapshotBase<LiveBucket>;
+template class SearchableBucketListSnapshotBase<HotArchiveBucket>;
 }
\ No newline at end of file
diff --git a/src/bucket/BucketListSnapshot.h b/src/bucket/BucketListSnapshot.h
index ea14869f3a..c4cd98450f 100644
--- a/src/bucket/BucketListSnapshot.h
+++ b/src/bucket/BucketListSnapshot.h
@@ -4,6 +4,7 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/Bucket.h"
 #include "bucket/BucketList.h"
 #include "bucket/BucketManagerImpl.h"
 #include "bucket/BucketSnapshot.h"
@@ -17,30 +18,43 @@ class Timer;
 namespace stellar
 {
 
-struct BucketLevelSnapshot
+template <class BucketT> struct BucketLevelSnapshot
 {
-    BucketSnapshot curr;
-    BucketSnapshot snap;
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
 
-    BucketLevelSnapshot(BucketLevel const& level);
+    using BucketSnapshotT =
+        std::conditional_t<std::is_same_v<BucketT, LiveBucket>,
+                           LiveBucketSnapshot, HotArchiveBucketSnapshot>;
+
+    BucketSnapshotT curr;
+    BucketSnapshotT snap;
+
+    BucketLevelSnapshot(BucketLevel<BucketT> const& level);
 };
 
-class BucketListSnapshot : public NonMovable
+template <class BucketT> class BucketListSnapshot : public NonMovable
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+    using BucketSnapshotT =
+        std::conditional_t<std::is_same_v<BucketT, LiveBucket>,
+                           LiveBucketSnapshot, HotArchiveBucketSnapshot>;
+
   private:
-    std::vector<BucketLevelSnapshot> mLevels;
+    std::vector<BucketLevelSnapshot<BucketT>> mLevels;
 
     // LedgerHeader associated with this ledger state snapshot
     LedgerHeader const mHeader;
 
   public:
-    BucketListSnapshot(BucketList const& bl, LedgerHeader hhe);
+    BucketListSnapshot(BucketListBase<BucketT> const& bl, LedgerHeader hhe);
 
     // Only allow copies via constructor
     BucketListSnapshot(BucketListSnapshot const& snapshot);
     BucketListSnapshot& operator=(BucketListSnapshot const&) = delete;
 
-    std::vector<BucketLevelSnapshot> const& getLevels() const;
+    std::vector<BucketLevelSnapshot<BucketT>> const& getLevels() const;
     uint32_t getLedgerSeq() const;
     LedgerHeader const&
     getLedgerHeader() const
@@ -58,21 +72,54 @@ class BucketListSnapshot : public NonMovable
 // instance will check that the current snapshot is up to date via the
 // BucketListSnapshotManager and will be refreshed accordingly. Callers can
 // assume SearchableBucketListSnapshot is always up to date.
-class SearchableBucketListSnapshot : public NonMovableOrCopyable
+template <class BucketT>
+class SearchableBucketListSnapshotBase : public NonMovableOrCopyable
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
+    using BucketSnapshotT =
+        std::conditional_t<std::is_same_v<BucketT, LiveBucket>,
+                           LiveBucketSnapshot, HotArchiveBucketSnapshot>;
+
+  protected:
+    virtual ~SearchableBucketListSnapshotBase() = 0;
+
     BucketSnapshotManager const& mSnapshotManager;
 
     // Snapshot managed by SnapshotManager
-    std::unique_ptr<BucketListSnapshot const> mSnapshot{};
-    std::map<uint32_t, std::unique_ptr<BucketListSnapshot const>>
+    std::unique_ptr<BucketListSnapshot<BucketT> const> mSnapshot{};
+    std::map<uint32_t, std::unique_ptr<BucketListSnapshot<BucketT> const>>
         mHistoricalSnapshots;
 
-    SearchableBucketListSnapshot(BucketSnapshotManager const& snapshotManager);
+    // Loops through all buckets, starting with curr at level 0, then snap at
+    // level 0, etc. Calls f on each bucket. Exits early if function
+    // returns true
+    void loopAllBuckets(std::function<bool(BucketSnapshotT const&)> f,
+                        BucketListSnapshot<BucketT> const& snapshot) const;
+
+    SearchableBucketListSnapshotBase(
+        BucketSnapshotManager const& snapshotManager);
+
+  public:
+    uint32_t
+    getLedgerSeq() const
+    {
+        return mSnapshot->getLedgerSeq();
+    }
+
+    LedgerHeader const& getLedgerHeader();
+};
 
-    friend std::shared_ptr<SearchableBucketListSnapshot>
-    BucketSnapshotManager::copySearchableBucketListSnapshot() const;
+class SearchableLiveBucketListSnapshot
+    : public SearchableBucketListSnapshotBase<LiveBucket>
+{
+    SearchableLiveBucketListSnapshot(
+        BucketSnapshotManager const& snapshotManager);
 
   public:
+    std::shared_ptr<LedgerEntry> load(LedgerKey const& k);
+
     std::vector<LedgerEntry>
     loadKeysWithLimits(std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys,
                        LedgerKeyMeter* lkMeter = nullptr);
@@ -84,15 +131,13 @@ class SearchableBucketListSnapshot : public NonMovableOrCopyable
     std::vector<InflationWinner> loadInflationWinners(size_t maxWinners,
                                                       int64_t minBalance);
 
-    std::shared_ptr<LedgerEntry> load(LedgerKey const& k);
-
     // Loads inKeys from the specified historical snapshot. Returns
-    // <load_result_vec, true> if the snapshot for the given ledger is
-    // available,  <empty_vec, false> otherwise. Note that ledgerSeq is defined
+    // load_result_vec if the snapshot for the given ledger is
+    // available, std::nullopt otherwise. Note that ledgerSeq is defined
     // as the state of the BucketList at the beginning of the ledger. This means
     // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry
     // in the BucketList is N - 1.
-    std::pair<std::vector<LedgerEntry>, bool>
+    std::optional<std::vector<LedgerEntry>>
     loadKeysFromLedger(std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys,
                        uint32_t ledgerSeq);
 
@@ -101,7 +146,34 @@ class SearchableBucketListSnapshot : public NonMovableOrCopyable
                                    EvictionIterator evictionIter,
                                    std::shared_ptr<EvictionStatistics> stats,
                                    StateArchivalSettings const& sas);
-    uint32_t getLedgerSeq() const;
-    LedgerHeader const& getLedgerHeader();
+
+    friend std::shared_ptr<SearchableLiveBucketListSnapshot>
+    BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const;
+};
+
+class SearchableHotArchiveBucketListSnapshot
+    : public SearchableBucketListSnapshotBase<HotArchiveBucket>
+{
+    SearchableHotArchiveBucketListSnapshot(
+        BucketSnapshotManager const& snapshotManager);
+
+  public:
+    std::shared_ptr<HotArchiveBucketEntry> load(LedgerKey const& k);
+
+    std::vector<HotArchiveBucketEntry>
+    loadKeys(std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys);
+
+    // Loads inKeys from the specified historical snapshot. Returns
+    // load_result_vec if the snapshot for the given ledger is
+    // available, std::nullopt otherwise. Note that ledgerSeq is defined
+    // as the state of the BucketList at the beginning of the ledger. This means
+    // that for ledger N, the maximum lastModifiedLedgerSeq of any LedgerEntry
+    // in the BucketList is N - 1.
+    std::optional<std::vector<HotArchiveBucketEntry>>
+    loadKeysFromLedger(std::set<LedgerKey, LedgerEntryIdCmp> const& inKeys,
+                       uint32_t ledgerSeq);
+
+    friend std::shared_ptr<SearchableHotArchiveBucketListSnapshot>
+    BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const;
 };
 }
\ No newline at end of file
diff --git a/src/bucket/BucketManager.h b/src/bucket/BucketManager.h
index a64bd9181f..471d410c5a 100644
--- a/src/bucket/BucketManager.h
+++ b/src/bucket/BucketManager.h
@@ -26,10 +26,10 @@ namespace stellar
 class AbstractLedgerTxn;
 class Application;
 class BasicWork;
-class BucketList;
+class LiveBucketList;
+class HotArchiveBucketList;
 class BucketSnapshotManager;
 class Config;
-class SearchableBucketListSnapshot;
 class TmpDirManager;
 struct HistoryArchiveState;
 struct InflationWinner;
@@ -192,7 +192,8 @@ class BucketManager : NonMovableOrCopyable
     virtual std::string const& getTmpDir() = 0;
     virtual TmpDirManager& getTmpDirManager() = 0;
     virtual std::string const& getBucketDir() const = 0;
-    virtual BucketList& getBucketList() = 0;
+    virtual LiveBucketList& getLiveBucketList() = 0;
+    virtual HotArchiveBucketList& getHotArchiveBucketList() = 0;
     virtual BucketSnapshotManager& getBucketSnapshotManager() const = 0;
     virtual bool renameBucketDirFile(std::filesystem::path const& src,
                                      std::filesystem::path const& dst) = 0;
@@ -215,12 +216,20 @@ class BucketManager : NonMovableOrCopyable
     // This method is mostly-threadsafe -- assuming you don't destruct the
     // BucketManager mid-call -- and is intended to be called from both main and
     // worker threads. Very carefully.
-    virtual std::shared_ptr<Bucket>
-    adoptFileAsBucket(std::string const& filename, uint256 const& hash,
-                      MergeKey* mergeKey,
-                      std::unique_ptr<BucketIndex const> index) = 0;
-
-    // Companion method to `adoptFileAsBucket` also called from the
+    virtual std::shared_ptr<LiveBucket>
+    adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash,
+                          MergeKey* mergeKey,
+                          std::unique_ptr<BucketIndex const> index) = 0;
+    virtual std::shared_ptr<HotArchiveBucket>
+    adoptFileAsHotArchiveBucket(std::string const& filename,
+                                uint256 const& hash, MergeKey* mergeKey,
+                                std::unique_ptr<BucketIndex const> index) = 0;
+    virtual std::shared_ptr<ColdArchiveBucket>
+    adoptFileAsPendingColdArchiveBucket(
+        std::string const& filename, uint256 const& hash,
+        std::unique_ptr<BucketIndex const> index, uint32_t epoch) = 0;
+
+    // Companion method to `adoptFileAsLiveBucket` also called from the
     // `BucketOutputIterator::getBucket` merge-completion path. This method
     // however should be called when the output bucket is _empty_ and thereby
     // doesn't correspond to a file on disk; the method forgets about the
@@ -233,15 +242,26 @@ class BucketManager : NonMovableOrCopyable
     virtual std::shared_ptr<Bucket> getBucketIfExists(uint256 const& hash) = 0;
 
     // Return a bucket by hash if we have it, else return nullptr.
-    virtual std::shared_ptr<Bucket> getBucketByHash(uint256 const& hash) = 0;
+    virtual std::shared_ptr<LiveBucket>
+    getLiveBucketByHash(uint256 const& hash) = 0;
+    virtual std::shared_ptr<HotArchiveBucket>
+    getHotArchiveBucketByHash(uint256 const& hash) = 0;
+
+    // If a pending cold archive bucket exists for the given epoch on disk,
+    // this method will build a Bucket object based on the file and return it.
+    // Otherwise, returns and empty Bucket.
+    virtual std::shared_ptr<ColdArchiveBucket>
+    getPendingColdArchiveBucketByEpoch(uint32_t epoch) = 0;
 
     // Get a reference to a merge-future that's either running (or finished
     // somewhat recently) from either a map of the std::shared_futures doing the
     // merges and/or a set of records mapping merge inputs to outputs and the
     // set of outputs held in the BucketManager. Returns an invalid future if no
     // such future can be found or synthesized.
-    virtual std::shared_future<std::shared_ptr<Bucket>>
-    getMergeFuture(MergeKey const& key) = 0;
+    virtual std::shared_future<std::shared_ptr<LiveBucket>>
+    getLiveMergeFuture(MergeKey const& key) = 0;
+    virtual std::shared_future<std::shared_ptr<HotArchiveBucket>>
+    getHotArchiveMergeFuture(MergeKey const& key) = 0;
 
     // Add a reference to a merge _in progress_ (not yet adopted as a file) to
     // the BucketManager's internal map of std::shared_futures doing merges.
@@ -249,8 +269,11 @@ class BucketManager : NonMovableOrCopyable
     // be removed from the map when the merge completes and the output file is
     // adopted.
     virtual void
-    putMergeFuture(MergeKey const& key,
-                   std::shared_future<std::shared_ptr<Bucket>>) = 0;
+    putLiveMergeFuture(MergeKey const& key,
+                       std::shared_future<std::shared_ptr<LiveBucket>>) = 0;
+    virtual void putHotArchiveMergeFuture(
+        MergeKey const& key,
+        std::shared_future<std::shared_ptr<HotArchiveBucket>>) = 0;
 
 #ifdef BUILD_TESTS
     // Drop all references to merge futures in progress.
@@ -267,10 +290,15 @@ class BucketManager : NonMovableOrCopyable
     // be given separate init (created) and live (updated) entry vectors. The
     // `header` value should be taken from the ledger at which this batch is
     // being added.
-    virtual void addBatch(Application& app, LedgerHeader header,
-                          std::vector<LedgerEntry> const& initEntries,
-                          std::vector<LedgerEntry> const& liveEntries,
-                          std::vector<LedgerKey> const& deadEntries) = 0;
+    virtual void addLiveBatch(Application& app, LedgerHeader header,
+                              std::vector<LedgerEntry> const& initEntries,
+                              std::vector<LedgerEntry> const& liveEntries,
+                              std::vector<LedgerKey> const& deadEntries) = 0;
+    virtual void
+    addHotArchiveBatch(Application& app, LedgerHeader header,
+                       std::vector<LedgerEntry> const& archivedEntries,
+                       std::vector<LedgerKey> const& restoredEntries,
+                       std::vector<LedgerKey> const& deletedEntries) = 0;
 
     // Update the given LedgerHeader's bucketListHash to reflect the current
     // state of the bucket list.
@@ -287,9 +315,6 @@ class BucketManager : NonMovableOrCopyable
     // Scans BucketList for non-live entries to evict starting at the entry
     // pointed to by EvictionIterator. Scans until `maxEntriesToEvict` entries
     // have been evicted or maxEvictionScanSize bytes have been scanned.
-    virtual void scanForEvictionLegacy(AbstractLedgerTxn& ltx,
-                                       uint32_t ledgerSeq) = 0;
-
     virtual void startBackgroundEvictionScan(uint32_t ledgerSeq) = 0;
     virtual void
     resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq,
@@ -300,7 +325,7 @@ class BucketManager : NonMovableOrCopyable
 
 #ifdef BUILD_TESTS
     // Install a fake/assumed ledger version and bucket list hash to use in next
-    // call to addBatch and snapshotLedger. This interface exists only for
+    // call to addLiveBatch and snapshotLedger. This interface exists only for
     // testing in a specific type of history replay.
     virtual void setNextCloseVersionAndHashForTesting(uint32_t protocolVers,
                                                       uint256 const& hash) = 0;
@@ -349,7 +374,7 @@ class BucketManager : NonMovableOrCopyable
 
     // Merge the bucket list of the provided HAS into a single "super bucket"
     // consisting of only live entries, and return it.
-    virtual std::shared_ptr<Bucket>
+    virtual std::shared_ptr<LiveBucket>
     mergeBuckets(HistoryArchiveState const& has) = 0;
 
     // Visits all the active ledger entries or subset thereof.
@@ -383,8 +408,9 @@ class BucketManager : NonMovableOrCopyable
     virtual Config const& getConfig() const = 0;
 
     // Get bucketlist snapshot
-    virtual std::shared_ptr<SearchableBucketListSnapshot>
-    getSearchableBucketListSnapshot() = 0;
+    virtual std::shared_ptr<SearchableLiveBucketListSnapshot>
+    getSearchableLiveBucketListSnapshot() = 0;
+
     virtual void reportBucketEntryCountMetrics() = 0;
 };
 }
diff --git a/src/bucket/BucketManagerImpl.cpp b/src/bucket/BucketManagerImpl.cpp
index a953bef124..077213d7fb 100644
--- a/src/bucket/BucketManagerImpl.cpp
+++ b/src/bucket/BucketManagerImpl.cpp
@@ -12,6 +12,7 @@
 #include "bucket/BucketSnapshotManager.h"
 #include "crypto/BLAKE2.h"
 #include "crypto/Hex.h"
+#include "crypto/SHA.h"
 #include "history/HistoryManager.h"
 #include "historywork/VerifyBucketWork.h"
 #include "ledger/LedgerManager.h"
@@ -23,6 +24,7 @@
 #include "util/GlobalChecks.h"
 #include "util/LogSlowExecution.h"
 #include "util/Logging.h"
+#include "util/ProtocolVersion.h"
 #include "util/TmpDir.h"
 #include "util/types.h"
 #include "xdr/Stellar-ledger.h"
@@ -30,6 +32,7 @@
 #include <fmt/chrono.h>
 #include <fmt/format.h>
 #include <map>
+#include <memory>
 #include <regex>
 #include <set>
 #include <thread>
@@ -41,6 +44,7 @@
 #include "work/WorkScheduler.h"
 #include "xdrpp/printer.h"
 #include <Tracy.hpp>
+#include <type_traits>
 
 namespace stellar
 {
@@ -123,16 +127,15 @@ BucketManagerImpl::initialize()
 
     if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
     {
-        mBucketList = std::make_unique<BucketList>();
-
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            mSnapshotManager = std::make_unique<BucketSnapshotManager>(
-                mApp,
-                std::make_unique<BucketListSnapshot>(*mBucketList,
-                                                     LedgerHeader()),
-                mApp.getConfig().QUERY_SNAPSHOT_LEDGERS);
-        }
+        mLiveBucketList = std::make_unique<LiveBucketList>();
+        mHotArchiveBucketList = std::make_unique<HotArchiveBucketList>();
+        mSnapshotManager = std::make_unique<BucketSnapshotManager>(
+            mApp,
+            std::make_unique<BucketListSnapshot<LiveBucket>>(*mLiveBucketList,
+                                                             LedgerHeader()),
+            std::make_unique<BucketListSnapshot<HotArchiveBucket>>(
+                *mHotArchiveBucketList, LedgerHeader()),
+            mApp.getConfig().QUERY_SNAPSHOT_LEDGERS);
     }
 }
 
@@ -166,14 +169,20 @@ EvictionCounters::EvictionCounters(Application& app)
 
 BucketManagerImpl::BucketManagerImpl(Application& app)
     : mApp(app)
-    , mBucketList(nullptr)
+    , mLiveBucketList(nullptr)
+    , mHotArchiveBucketList(nullptr)
     , mSnapshotManager(nullptr)
     , mTmpDirManager(nullptr)
     , mWorkDir(nullptr)
     , mLockedBucketDir(nullptr)
-    , mBucketObjectInsertBatch(app.getMetrics().NewMeter(
+    , mBucketLiveObjectInsertBatch(app.getMetrics().NewMeter(
           {"bucket", "batch", "objectsadded"}, "object"))
-    , mBucketAddBatch(app.getMetrics().NewTimer({"bucket", "batch", "addtime"}))
+    , mBucketArchiveObjectInsertBatch(app.getMetrics().NewMeter(
+          {"bucket", "batch-archive", "objectsadded"}, "object"))
+    , mBucketAddLiveBatch(
+          app.getMetrics().NewTimer({"bucket", "batch", "addtime"}))
+    , mBucketAddArchiveBatch(
+          app.getMetrics().NewTimer({"bucket", "batch-archive", "addtime"}))
     , mBucketSnapMerge(app.getMetrics().NewTimer({"bucket", "snap", "merge"}))
     , mSharedBucketsSize(
           app.getMetrics().NewCounter({"bucket", "memory", "shared"}))
@@ -181,14 +190,12 @@ BucketManagerImpl::BucketManagerImpl(Application& app)
           {"bucketlistDB", "bloom", "misses"}, "bloom"))
     , mBucketListDBBloomLookups(app.getMetrics().NewMeter(
           {"bucketlistDB", "bloom", "lookups"}, "bloom"))
-    , mBucketListSizeCounter(
+    , mLiveBucketListSizeCounter(
           app.getMetrics().NewCounter({"bucketlist", "size", "bytes"}))
+    , mArchiveBucketListSizeCounter(
+          app.getMetrics().NewCounter({"bucketlist-archive", "size", "bytes"}))
     , mBucketListEvictionCounters(app)
     , mEvictionStatistics(std::make_shared<EvictionStatistics>())
-    // Minimal DB is stored in the buckets dir, so delete it only when
-    // mode does not use minimal DB
-    , mDeleteEntireBucketDirInDtor(
-          app.getConfig().isInMemoryModeWithoutMinimalDB())
 {
     for (uint32_t t =
              static_cast<uint32_t>(LedgerEntryTypeAndDurability::ACCOUNT);
@@ -211,9 +218,19 @@ const std::string BucketManagerImpl::kLockFilename = "stellar-core.lock";
 namespace
 {
 std::string
-bucketBasename(std::string const& bucketHexHash)
+bucketBasename(std::string const& bucketHexHash, std::optional<uint32_t> epoch)
 {
-    return "bucket-" + bucketHexHash + ".xdr";
+    std::string prefix;
+    if (epoch)
+    {
+        prefix = fmt::format("pending-bucket-{}-", *epoch);
+    }
+    else
+    {
+        prefix = "bucket-";
+    }
+
+    return prefix + bucketHexHash + ".xdr";
 }
 
 bool
@@ -223,24 +240,50 @@ isBucketFile(std::string const& name)
     return std::regex_match(name, re);
 };
 
+bool
+isPendingBucketFile(std::string const& name)
+{
+    static std::regex re("^pending-bucket-[0-9]+-[a-z0-9]{64}\\.xdr(\\.gz)?$");
+    return std::regex_match(name, re);
+};
+
+uint32_t
+extractEpochFromPendingBucketFilename(std::string const& name)
+{
+    // pending-bucket-<epoch>-<hash>.xdr
+    // prefix before epoch: pending-bucket-
+    size_t const epochStart = 15;
+    size_t const lengthOfEpoch = name.find('-', epochStart) - epochStart;
+    return std::stoul(name.substr(epochStart, lengthOfEpoch));
+};
+
 uint256
 extractFromFilename(std::string const& name)
 {
+    // pending-bucket-<epoch>-<hash>.xdr
+    if (name.substr(0, 15) == "pending-bucket-")
+    {
+        return hexToBin256(name.substr(name.find_last_of('-') + 1, 64));
+    }
+
+    // bucket-<hash>.xdr
     return hexToBin256(name.substr(7, 64));
 };
 }
 
 std::string
-BucketManagerImpl::bucketFilename(std::string const& bucketHexHash)
+BucketManagerImpl::bucketFilename(std::string const& bucketHexHash,
+                                  std::optional<uint32_t> epoch)
 {
-    std::string basename = bucketBasename(bucketHexHash);
+    std::string basename = bucketBasename(bucketHexHash, epoch);
     return getBucketDir() + "/" + basename;
 }
 
 std::string
-BucketManagerImpl::bucketFilename(Hash const& hash)
+BucketManagerImpl::bucketFilename(Hash const& hash,
+                                  std::optional<uint32_t> epoch)
 {
-    return bucketFilename(binToHex(hash));
+    return bucketFilename(binToHex(hash), epoch);
 }
 
 std::string
@@ -273,14 +316,7 @@ BucketManagerImpl::getBucketDir() const
 BucketManagerImpl::~BucketManagerImpl()
 {
     ZoneScoped;
-    if (mDeleteEntireBucketDirInDtor)
-    {
-        deleteEntireBucketDir();
-    }
-    else
-    {
-        deleteTmpDirAndUnlockBucketDir();
-    }
+    deleteTmpDirAndUnlockBucketDir();
 }
 
 void
@@ -328,17 +364,23 @@ BucketManagerImpl::deleteTmpDirAndUnlockBucketDir()
     }
 }
 
-BucketList&
-BucketManagerImpl::getBucketList()
+LiveBucketList&
+BucketManagerImpl::getLiveBucketList()
+{
+    releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST);
+    return *mLiveBucketList;
+}
+
+HotArchiveBucketList&
+BucketManagerImpl::getHotArchiveBucketList()
 {
     releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST);
-    return *mBucketList;
+    return *mHotArchiveBucketList;
 }
 
 BucketSnapshotManager&
 BucketManagerImpl::getBucketSnapshotManager() const
 {
-    releaseAssertOrThrow(mApp.getConfig().isUsingBucketListDB());
     releaseAssert(mSnapshotManager);
     return *mSnapshotManager;
 }
@@ -474,10 +516,39 @@ BucketManagerImpl::renameBucketDirFile(std::filesystem::path const& src,
     }
 }
 
-std::shared_ptr<Bucket>
+std::shared_ptr<LiveBucket>
+BucketManagerImpl::adoptFileAsLiveBucket(
+    std::string const& filename, uint256 const& hash, MergeKey* mergeKey,
+    std::unique_ptr<BucketIndex const> index)
+{
+    return adoptFileAsBucket<LiveBucket>(filename, hash, mergeKey,
+                                         std::move(index));
+}
+
+std::shared_ptr<HotArchiveBucket>
+BucketManagerImpl::adoptFileAsHotArchiveBucket(
+    std::string const& filename, uint256 const& hash, MergeKey* mergeKey,
+    std::unique_ptr<BucketIndex const> index)
+{
+    return adoptFileAsBucket<HotArchiveBucket>(filename, hash, mergeKey,
+                                               std::move(index));
+}
+
+std::shared_ptr<ColdArchiveBucket>
+BucketManagerImpl::adoptFileAsPendingColdArchiveBucket(
+    std::string const& filename, uint256 const& hash,
+    std::unique_ptr<BucketIndex const> index, uint32_t epoch)
+{
+    return adoptFileAsBucket<ColdArchiveBucket>(filename, hash, nullptr,
+                                                std::move(index), epoch);
+}
+
+template <typename BucketT>
+std::shared_ptr<BucketT>
 BucketManagerImpl::adoptFileAsBucket(std::string const& filename,
                                      uint256 const& hash, MergeKey* mergeKey,
-                                     std::unique_ptr<BucketIndex const> index)
+                                     std::unique_ptr<BucketIndex const> index,
+                                     std::optional<uint32_t> epoch)
 {
     ZoneScoped;
     releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST);
@@ -492,15 +563,16 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename,
         // weak record of the input/output mapping, so we can reconstruct the
         // future if anyone wants to restart the same merge before the bucket
         // expires.
-        CLOG_TRACE(Bucket,
-                   "BucketManager::adoptFileAsBucket switching merge {} from "
-                   "live to finished for output={}",
-                   *mergeKey, hexAbbrev(hash));
+        CLOG_TRACE(
+            Bucket,
+            "BucketManager::adoptFileAsLiveBucket switching merge {} from "
+            "live to finished for output={}",
+            *mergeKey, hexAbbrev(hash));
         mLiveFutures.erase(*mergeKey);
     }
 
     // Check to see if we have an existing bucket (either in-memory or on-disk)
-    std::shared_ptr<Bucket> b = getBucketByHash(hash);
+    std::shared_ptr<BucketT> b = getBucketByHash<BucketT>(hash);
     if (b)
     {
         CLOG_DEBUG(
@@ -518,7 +590,17 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename,
     }
     else
     {
-        std::string canonicalName = bucketFilename(hash);
+        std::string canonicalName;
+        if (epoch)
+        {
+            releaseAssert((std::is_same_v<BucketT, ColdArchiveBucket>));
+            canonicalName = bucketFilename(hash, epoch);
+        }
+        else
+        {
+            canonicalName = bucketFilename(hash);
+        }
+
         CLOG_DEBUG(Bucket, "Adopting bucket file {} as {}", filename,
                    canonicalName);
         if (!renameBucketDirFile(filename, canonicalName))
@@ -535,7 +617,7 @@ BucketManagerImpl::adoptFileAsBucket(std::string const& filename,
             }
         }
 
-        b = std::make_shared<Bucket>(canonicalName, hash, std::move(index));
+        b = std::make_shared<BucketT>(canonicalName, hash, std::move(index));
         {
             mSharedBuckets.emplace(hash, b);
             mSharedBucketsSize.set_count(mSharedBuckets.size());
@@ -587,21 +669,86 @@ BucketManagerImpl::getBucketIfExists(uint256 const& hash)
     return nullptr;
 }
 
-std::shared_ptr<Bucket>
+std::shared_ptr<LiveBucket>
+BucketManagerImpl::getLiveBucketByHash(uint256 const& hash)
+{
+    return getBucketByHash<LiveBucket>(hash);
+}
+
+std::shared_ptr<HotArchiveBucket>
+BucketManagerImpl::getHotArchiveBucketByHash(uint256 const& hash)
+{
+    return getBucketByHash<HotArchiveBucket>(hash);
+}
+
+std::shared_ptr<ColdArchiveBucket>
+BucketManagerImpl::getPendingColdArchiveBucketByEpoch(uint32_t epoch)
+{
+    ZoneScoped;
+    std::lock_guard<std::recursive_mutex> lock(mBucketMutex);
+    for (auto f : fs::findfiles(getBucketDir(), isPendingBucketFile))
+    {
+        auto e = extractEpochFromPendingBucketFilename(f);
+        if (e != epoch)
+        {
+            continue;
+        }
+
+        auto hash = extractFromFilename(f);
+        auto i = mSharedBuckets.find(hash);
+        if (i != mSharedBuckets.end())
+        {
+            CLOG_TRACE(Bucket,
+                       "BucketManager::getPendingColdArchiveBucketByEpoch({}) "
+                       "found bucket {}",
+                       epoch, i->second->getFilename());
+
+            // Because BucketManger has an impl class, no public templated
+            // functions can be declared. This means we have to manually enforce
+            // types via `getLiveBucketByHash` and `getHotBucketByHash`, leading
+            // to this ugly cast.
+            auto ret = std::dynamic_pointer_cast<ColdArchiveBucket>(i->second);
+            releaseAssertOrThrow(ret);
+            return ret;
+        }
+        // Bucket not yet in memory, create one from file
+        else
+        {
+            std::string canonicalName = bucketFilename(hash, epoch);
+            auto p = std::make_shared<ColdArchiveBucket>(canonicalName, hash,
+                                                         /*index=*/nullptr);
+            mSharedBuckets.emplace(hash, p);
+            mSharedBucketsSize.set_count(mSharedBuckets.size());
+            return p;
+        }
+    }
+
+    return std::make_shared<ColdArchiveBucket>();
+}
+
+template <class BucketT>
+std::shared_ptr<BucketT>
 BucketManagerImpl::getBucketByHash(uint256 const& hash)
 {
     ZoneScoped;
     std::lock_guard<std::recursive_mutex> lock(mBucketMutex);
     if (isZero(hash))
     {
-        return std::make_shared<Bucket>();
+        return std::make_shared<BucketT>();
     }
     auto i = mSharedBuckets.find(hash);
     if (i != mSharedBuckets.end())
     {
         CLOG_TRACE(Bucket, "BucketManager::getBucketByHash({}) found bucket {}",
                    binToHex(hash), i->second->getFilename());
-        return i->second;
+
+        // Because BucketManger has an impl class, no public templated functions
+        // can be declared. This means we have to manually enforce types via
+        // `getLiveBucketByHash` and `getHotBucketByHash`, leading to this ugly
+        // cast.
+        auto ret = std::dynamic_pointer_cast<BucketT>(i->second);
+        releaseAssertOrThrow(ret);
+        return ret;
     }
     std::string canonicalName = bucketFilename(hash);
     if (fs::exists(canonicalName))
@@ -612,15 +759,28 @@ BucketManagerImpl::getBucketByHash(uint256 const& hash)
                    binToHex(hash));
 
         auto p =
-            std::make_shared<Bucket>(canonicalName, hash, /*index=*/nullptr);
+            std::make_shared<BucketT>(canonicalName, hash, /*index=*/nullptr);
         mSharedBuckets.emplace(hash, p);
         mSharedBucketsSize.set_count(mSharedBuckets.size());
         return p;
     }
-    return std::shared_ptr<Bucket>();
+    return std::shared_ptr<BucketT>();
+}
+
+std::shared_future<std::shared_ptr<LiveBucket>>
+BucketManagerImpl::getLiveMergeFuture(MergeKey const& key)
+{
+    return getMergeFuture<LiveBucket>(key);
+}
+
+std::shared_future<std::shared_ptr<HotArchiveBucket>>
+BucketManagerImpl::getHotArchiveMergeFuture(MergeKey const& key)
+{
+    return getMergeFuture<HotArchiveBucket>(key);
 }
 
-std::shared_future<std::shared_ptr<Bucket>>
+template <class BucketT>
+std::shared_future<std::shared_ptr<BucketT>>
 BucketManagerImpl::getMergeFuture(MergeKey const& key)
 {
     ZoneScoped;
@@ -634,14 +794,14 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key)
         Hash bucketHash;
         if (mFinishedMerges.findMergeFor(key, bucketHash))
         {
-            auto bucket = getBucketByHash(bucketHash);
+            auto bucket = getBucketByHash<BucketT>(bucketHash);
             if (bucket)
             {
                 CLOG_TRACE(Bucket,
                            "BucketManager::getMergeFuture returning new future "
                            "for finished merge {} with output={}",
                            key, hexAbbrev(bucketHash));
-                std::promise<std::shared_ptr<Bucket>> promise;
+                std::promise<std::shared_ptr<BucketT>> promise;
                 auto future = promise.get_future().share();
                 promise.set_value(bucket);
                 mc.mFinishedMergeReattachments++;
@@ -653,7 +813,7 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key)
             Bucket,
             "BucketManager::getMergeFuture returning empty future for merge {}",
             key);
-        return std::shared_future<std::shared_ptr<Bucket>>();
+        return std::shared_future<std::shared_ptr<BucketT>>();
     }
     CLOG_TRACE(
         Bucket,
@@ -661,12 +821,32 @@ BucketManagerImpl::getMergeFuture(MergeKey const& key)
         key);
     mc.mRunningMergeReattachments++;
     incrMergeCounters(mc);
-    return i->second;
+
+    // Because BucketManger has an impl class, no public templated functions
+    // can be declared. This means we have to manually enforce types via
+    // leading to this ugly variadic get that throws if the type is not correct.
+    return std::get<std::shared_future<std::shared_ptr<BucketT>>>(i->second);
+}
+
+void
+BucketManagerImpl::putLiveMergeFuture(
+    MergeKey const& key, std::shared_future<std::shared_ptr<LiveBucket>> wp)
+{
+    putMergeFuture<LiveBucket>(key, wp);
 }
 
+void
+BucketManagerImpl::putHotArchiveMergeFuture(
+    MergeKey const& key,
+    std::shared_future<std::shared_ptr<HotArchiveBucket>> wp)
+{
+    putMergeFuture<HotArchiveBucket>(key, wp);
+}
+
+template <class BucketT>
 void
 BucketManagerImpl::putMergeFuture(
-    MergeKey const& key, std::shared_future<std::shared_ptr<Bucket>> wp)
+    MergeKey const& key, std::shared_future<std::shared_ptr<BucketT>> wp)
 {
     ZoneScoped;
     releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST);
@@ -697,31 +877,37 @@ BucketManagerImpl::getBucketListReferencedBuckets() const
         return referenced;
     }
 
-    // retain current bucket list
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
-    {
-        auto const& level = mBucketList->getLevel(i);
-        auto rit = referenced.emplace(level.getCurr()->getHash());
-        if (rit.second)
-        {
-            CLOG_TRACE(Bucket, "{} referenced by bucket list",
-                       binToHex(*rit.first));
-        }
-        rit = referenced.emplace(level.getSnap()->getHash());
-        if (rit.second)
-        {
-            CLOG_TRACE(Bucket, "{} referenced by bucket list",
-                       binToHex(*rit.first));
-        }
-        for (auto const& h : level.getNext().getHashes())
+    auto processBucketList = [&](auto const& bl, uint32_t levels) {
+        // retain current bucket list
+        for (uint32_t i = 0; i < levels; ++i)
         {
-            rit = referenced.emplace(hexToBin256(h));
+            auto const& level = bl->getLevel(i);
+            auto rit = referenced.emplace(level.getCurr()->getHash());
             if (rit.second)
             {
-                CLOG_TRACE(Bucket, "{} referenced by bucket list", h);
+                CLOG_TRACE(Bucket, "{} referenced by bucket list",
+                           binToHex(*rit.first));
+            }
+            rit = referenced.emplace(level.getSnap()->getHash());
+            if (rit.second)
+            {
+                CLOG_TRACE(Bucket, "{} referenced by bucket list",
+                           binToHex(*rit.first));
+            }
+            for (auto const& h : level.getNext().getHashes())
+            {
+                rit = referenced.emplace(hexToBin256(h));
+                if (rit.second)
+                {
+                    CLOG_TRACE(Bucket, "{} referenced by bucket list", h);
+                }
             }
         }
-    }
+    };
+
+    processBucketList(mLiveBucketList, LiveBucketList::kNumLevels);
+    processBucketList(mHotArchiveBucketList,
+                      BucketListBase<HotArchiveBucket>::kNumLevels);
 
     return referenced;
 }
@@ -805,6 +991,23 @@ BucketManagerImpl::cleanupStaleFiles()
             std::remove(indexFilename.c_str());
         }
     }
+
+    for (auto f : fs::findfiles(getBucketDir(), isPendingBucketFile))
+    {
+        auto hash = extractFromFilename(f);
+        if (referenced.find(hash) == std::end(referenced))
+        {
+            // we don't care about failure here
+            // if removing file failed one time, it may not fail when this is
+            // called again
+            auto fullName = getBucketDir() + "/" + f;
+            std::remove(fullName.c_str());
+
+            // GC index as well
+            auto indexFilename = bucketIndexFilename(hash);
+            std::remove(indexFilename.c_str());
+        }
+    }
 }
 
 void
@@ -876,7 +1079,7 @@ BucketManagerImpl::forgetUnreferencedBuckets()
                 // There should be no futures alive with this output: we
                 // switched to storing only weak input/output mappings
                 // when any merge producing the bucket completed (in
-                // adoptFileAsBucket), and we believe there's only one
+                // adoptFileAsLiveBucket), and we believe there's only one
                 // reference to the bucket anyways -- our own in
                 // mSharedBuckets. But there might be a race we missed,
                 // so double check & mop up here. Worst case we prevent
@@ -901,10 +1104,10 @@ BucketManagerImpl::forgetUnreferencedBuckets()
 }
 
 void
-BucketManagerImpl::addBatch(Application& app, LedgerHeader header,
-                            std::vector<LedgerEntry> const& initEntries,
-                            std::vector<LedgerEntry> const& liveEntries,
-                            std::vector<LedgerKey> const& deadEntries)
+BucketManagerImpl::addLiveBatch(Application& app, LedgerHeader header,
+                                std::vector<LedgerEntry> const& initEntries,
+                                std::vector<LedgerEntry> const& liveEntries,
+                                std::vector<LedgerKey> const& deadEntries)
 {
     ZoneScoped;
     releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST);
@@ -914,17 +1117,44 @@ BucketManagerImpl::addBatch(Application& app, LedgerHeader header,
         header.ledgerVersion = mFakeTestProtocolVersion;
     }
 #endif
-    auto timer = mBucketAddBatch.TimeScope();
-    mBucketObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() +
-                                  deadEntries.size());
-    mBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion,
-                          initEntries, liveEntries, deadEntries);
-    mBucketListSizeCounter.set_count(mBucketList->getSize());
-
-    if (app.getConfig().isUsingBucketListDB())
+    auto timer = mBucketAddLiveBatch.TimeScope();
+    mBucketLiveObjectInsertBatch.Mark(initEntries.size() + liveEntries.size() +
+                                      deadEntries.size());
+    mLiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion,
+                              initEntries, liveEntries, deadEntries);
+    mLiveBucketListSizeCounter.set_count(mLiveBucketList->getSize());
+    reportBucketEntryCountMetrics();
+}
+
+void
+BucketManagerImpl::addHotArchiveBatch(
+    Application& app, LedgerHeader header,
+    std::vector<LedgerEntry> const& archivedEntries,
+    std::vector<LedgerKey> const& restoredEntries,
+    std::vector<LedgerKey> const& deletedEntries)
+{
+    ZoneScoped;
+    releaseAssertOrThrow(app.getConfig().MODE_ENABLES_BUCKETLIST);
+    releaseAssertOrThrow(protocolVersionStartsFrom(
+        header.ledgerVersion,
+        Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION));
+#ifdef BUILD_TESTS
+    if (mUseFakeTestValuesForNextClose)
     {
-        reportBucketEntryCountMetrics();
+        header.ledgerVersion = mFakeTestProtocolVersion;
     }
+#endif
+    auto timer = mBucketAddArchiveBatch.TimeScope();
+    mBucketArchiveObjectInsertBatch.Mark(archivedEntries.size() +
+                                         restoredEntries.size() +
+                                         deletedEntries.size());
+
+    // Hot archive should never modify an existing entry, so there are never
+    // live entries
+    mHotArchiveBucketList->addBatch(app, header.ledgerSeq, header.ledgerVersion,
+                                    archivedEntries, restoredEntries,
+                                    deletedEntries);
+    mArchiveBucketListSizeCounter.set_count(mHotArchiveBucketList->getSize());
 }
 
 #ifdef BUILD_TESTS
@@ -945,6 +1175,11 @@ BucketManagerImpl::getBucketHashesInBucketDirForTesting() const
     {
         hashes.emplace(extractFromFilename(f));
     }
+
+    for (auto f : fs::findfiles(getBucketDir(), isPendingBucketFile))
+    {
+        hashes.emplace(extractFromFilename(f));
+    }
     return hashes;
 }
 
@@ -964,7 +1199,19 @@ BucketManagerImpl::snapshotLedger(LedgerHeader& currentHeader)
     Hash hash;
     if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
     {
-        hash = mBucketList->getHash();
+        if (protocolVersionStartsFrom(
+                currentHeader.ledgerVersion,
+                Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION))
+        {
+            // TODO: Hash Archive Bucket
+            // Dependency: HAS supports Hot Archive BucketList
+
+            hash = mLiveBucketList->getHash();
+        }
+        else
+        {
+            hash = mLiveBucketList->getHash();
+        }
     }
 
     currentHeader.bucketListHash = hash;
@@ -991,26 +1238,15 @@ BucketManagerImpl::maybeSetIndex(std::shared_ptr<Bucket> b,
     }
 }
 
-void
-BucketManagerImpl::scanForEvictionLegacy(AbstractLedgerTxn& ltx,
-                                         uint32_t ledgerSeq)
-{
-    ZoneScoped;
-    releaseAssert(protocolVersionStartsFrom(ltx.getHeader().ledgerVersion,
-                                            SOROBAN_PROTOCOL_VERSION));
-    mBucketList->scanForEvictionLegacy(
-        mApp, ltx, ledgerSeq, mBucketListEvictionCounters, mEvictionStatistics);
-}
-
 void
 BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq)
 {
-    releaseAssert(mApp.getConfig().isUsingBucketListDB());
     releaseAssert(mSnapshotManager);
     releaseAssert(!mEvictionFuture.valid());
     releaseAssert(mEvictionStatistics);
 
-    auto searchableBL = mSnapshotManager->copySearchableBucketListSnapshot();
+    auto searchableBL =
+        mSnapshotManager->copySearchableLiveBucketListSnapshot();
     auto const& cfg = mApp.getLedgerManager().getSorobanNetworkConfig();
     auto const& sas = cfg.stateArchivalSettings();
 
@@ -1027,7 +1263,7 @@ BucketManagerImpl::startBackgroundEvictionScan(uint32_t ledgerSeq)
     mEvictionFuture = task->get_future();
     mApp.postOnEvictionBackgroundThread(
         bind(&task_t::operator(), task),
-        "SearchableBucketListSnapshot: eviction scan");
+        "SearchableLiveBucketListSnapshot: eviction scan");
 }
 
 void
@@ -1166,49 +1402,51 @@ BucketManagerImpl::assumeState(HistoryArchiveState const& has,
     ZoneScoped;
     releaseAssertOrThrow(mApp.getConfig().MODE_ENABLES_BUCKETLIST);
 
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    // TODO: Assume archival bucket state
+    // Dependency: HAS supports Hot Archive BucketList
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
-        auto curr = getBucketByHash(hexToBin256(has.currentBuckets.at(i).curr));
-        auto snap = getBucketByHash(hexToBin256(has.currentBuckets.at(i).snap));
+        auto curr =
+            getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).curr));
+        auto snap =
+            getLiveBucketByHash(hexToBin256(has.currentBuckets.at(i).snap));
         if (!(curr && snap))
         {
             throw std::runtime_error("Missing bucket files while assuming "
-                                     "saved BucketList state");
+                                     "saved live BucketList state");
         }
 
         auto const& nextFuture = has.currentBuckets.at(i).next;
-        std::shared_ptr<Bucket> nextBucket = nullptr;
+        std::shared_ptr<LiveBucket> nextBucket = nullptr;
         if (nextFuture.hasOutputHash())
         {
             nextBucket =
-                getBucketByHash(hexToBin256(nextFuture.getOutputHash()));
+                getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash()));
             if (!nextBucket)
             {
-                throw std::runtime_error("Missing future bucket files while "
-                                         "assuming saved BucketList state");
+                throw std::runtime_error(
+                    "Missing future bucket files while "
+                    "assuming saved live BucketList state");
             }
         }
 
-        // Buckets on the BucketList should always be indexed when
-        // BucketListDB enabled
-        if (mApp.getConfig().isUsingBucketListDB())
+        // Buckets on the BucketList should always be indexed
+        releaseAssert(curr->isEmpty() || curr->isIndexed());
+        releaseAssert(snap->isEmpty() || snap->isIndexed());
+        if (nextBucket)
         {
-            releaseAssert(curr->isEmpty() || curr->isIndexed());
-            releaseAssert(snap->isEmpty() || snap->isIndexed());
-            if (nextBucket)
-            {
-                releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed());
-            }
+            releaseAssert(nextBucket->isEmpty() || nextBucket->isIndexed());
         }
 
-        mBucketList->getLevel(i).setCurr(curr);
-        mBucketList->getLevel(i).setSnap(snap);
-        mBucketList->getLevel(i).setNext(nextFuture);
+        mLiveBucketList->getLevel(i).setCurr(curr);
+        mLiveBucketList->getLevel(i).setSnap(snap);
+        mLiveBucketList->getLevel(i).setNext(nextFuture);
     }
 
     if (restartMerges)
     {
-        mBucketList->restartMerges(mApp, maxProtocolVersion, has.currentLedger);
+        mLiveBucketList->restartMerges(mApp, maxProtocolVersion,
+                                       has.currentLedger);
     }
     cleanupStaleFiles();
 }
@@ -1229,14 +1467,14 @@ BucketManagerImpl::isShutdown() const
 // inserting live or init entries. Should be called in a loop over a BL, from
 // old to new.
 static void
-loadEntriesFromBucket(std::shared_ptr<Bucket> b, std::string const& name,
+loadEntriesFromBucket(std::shared_ptr<LiveBucket> b, std::string const& name,
                       std::map<LedgerKey, LedgerEntry>& map)
 {
     ZoneScoped;
 
     using namespace std::chrono;
     medida::Timer timer;
-    BucketInputIterator in(b);
+    LiveBucketInputIterator in(b);
     timer.Time([&]() {
         while (in)
         {
@@ -1282,7 +1520,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has)
 
     std::map<LedgerKey, LedgerEntry> ledgerMap;
     std::vector<std::pair<Hash, std::string>> hashes;
-    for (uint32_t i = BucketList::kNumLevels; i > 0; --i)
+    for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i)
     {
         HistoryStateBucket const& hsb = has.currentBuckets.at(i - 1);
         hashes.emplace_back(hexToBin256(hsb.snap),
@@ -1296,7 +1534,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has)
         {
             continue;
         }
-        auto b = getBucketByHash(pair.first);
+        auto b = getLiveBucketByHash(pair.first);
         if (!b)
         {
             throw std::runtime_error(std::string("missing bucket: ") +
@@ -1307,7 +1545,7 @@ BucketManagerImpl::loadCompleteLedgerState(HistoryArchiveState const& has)
     return ledgerMap;
 }
 
-std::shared_ptr<Bucket>
+std::shared_ptr<LiveBucket>
 BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has)
 {
     ZoneScoped;
@@ -1317,8 +1555,8 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has)
     MergeCounters mc;
     auto& ctx = mApp.getClock().getIOContext();
     meta.ledgerVersion = mApp.getConfig().LEDGER_PROTOCOL_VERSION;
-    BucketOutputIterator out(getTmpDir(), /*keepDeadEntries=*/false, meta, mc,
-                             ctx, /*doFsync=*/true);
+    LiveBucketOutputIterator out(getTmpDir(), /*keepTombstoneEntries=*/false,
+                                 meta, mc, ctx, /*doFsync=*/true);
     for (auto const& pair : ledgerMap)
     {
         BucketEntry be;
@@ -1326,12 +1564,12 @@ BucketManagerImpl::mergeBuckets(HistoryArchiveState const& has)
         be.liveEntry() = pair.second;
         out.put(be);
     }
-    return out.getBucket(*this, /*shouldSynchronouslyIndex=*/false);
+    return out.getBucket(*this);
 }
 
 static bool
 visitLiveEntriesInBucket(
-    std::shared_ptr<Bucket const> b, std::string const& name,
+    std::shared_ptr<LiveBucket const> b, std::string const& name,
     std::optional<int64_t> minLedger,
     std::function<bool(LedgerEntry const&)> const& filterEntry,
     std::function<bool(LedgerEntry const&)> const& acceptEntry,
@@ -1344,7 +1582,7 @@ visitLiveEntriesInBucket(
 
     bool stopIteration = false;
     timer.Time([&]() {
-        for (BucketInputIterator in(b); in; ++in)
+        for (LiveBucketInputIterator in(b); in; ++in)
         {
             BucketEntry const& e = *in;
             if (e.type() == LIVEENTRY || e.type() == INITENTRY)
@@ -1395,7 +1633,7 @@ visitLiveEntriesInBucket(
 
 static bool
 visitAllEntriesInBucket(
-    std::shared_ptr<Bucket const> b, std::string const& name,
+    std::shared_ptr<LiveBucket const> b, std::string const& name,
     std::optional<int64_t> minLedger,
     std::function<bool(LedgerEntry const&)> const& filterEntry,
     std::function<bool(LedgerEntry const&)> const& acceptEntry)
@@ -1407,7 +1645,7 @@ visitAllEntriesInBucket(
 
     bool stopIteration = false;
     timer.Time([&]() {
-        for (BucketInputIterator in(b); in; ++in)
+        for (LiveBucketInputIterator in(b); in; ++in)
         {
             BucketEntry const& e = *in;
             if (e.type() == LIVEENTRY || e.type() == INITENTRY)
@@ -1459,7 +1697,7 @@ BucketManagerImpl::visitLedgerEntries(
 
     UnorderedSet<Hash> deletedEntries;
     std::vector<std::pair<Hash, std::string>> hashes;
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
         HistoryStateBucket const& hsb = has.currentBuckets.at(i);
         hashes.emplace_back(hexToBin256(hsb.curr),
@@ -1475,7 +1713,7 @@ BucketManagerImpl::visitLedgerEntries(
             {
                 continue;
             }
-            auto b = getBucketByHash(pair.first);
+            auto b = getLiveBucketByHash(pair.first);
             if (!b)
             {
                 throw std::runtime_error(std::string("missing bucket: ") +
@@ -1511,7 +1749,10 @@ BucketManagerImpl::scheduleVerifyReferencedBucketsWork()
         {
             continue;
         }
-        auto b = getBucketByHash(h);
+
+        // TODO: Update verify to for ArchiveBucket
+        // Dependency: HAS supports Hot Archive BucketList
+        auto b = getBucketByHash<LiveBucket>(h);
         if (!b)
         {
             throw std::runtime_error(fmt::format(
@@ -1530,16 +1771,15 @@ BucketManagerImpl::getConfig() const
     return mApp.getConfig();
 }
 
-std::shared_ptr<SearchableBucketListSnapshot>
-BucketManagerImpl::getSearchableBucketListSnapshot()
+std::shared_ptr<SearchableLiveBucketListSnapshot>
+BucketManagerImpl::getSearchableLiveBucketListSnapshot()
 {
-    releaseAssert(mApp.getConfig().isUsingBucketListDB());
     // Any other threads must maintain their own snapshot
     releaseAssert(threadIsMain());
     if (!mSearchableBucketListSnapshot)
     {
         mSearchableBucketListSnapshot =
-            mSnapshotManager->copySearchableBucketListSnapshot();
+            mSnapshotManager->copySearchableLiveBucketListSnapshot();
     }
 
     return mSearchableBucketListSnapshot;
@@ -1548,11 +1788,7 @@ BucketManagerImpl::getSearchableBucketListSnapshot()
 void
 BucketManagerImpl::reportBucketEntryCountMetrics()
 {
-    if (!mApp.getConfig().isUsingBucketListDB())
-    {
-        return;
-    }
-    auto bucketEntryCounters = mBucketList->sumBucketEntryCounters();
+    auto bucketEntryCounters = mLiveBucketList->sumBucketEntryCounters();
     for (auto [type, count] : bucketEntryCounters.entryTypeCounts)
     {
         auto countCounter = mBucketListEntryCountCounters.find(type);
diff --git a/src/bucket/BucketManagerImpl.h b/src/bucket/BucketManagerImpl.h
index 50b6479ede..191a2995c6 100644
--- a/src/bucket/BucketManagerImpl.h
+++ b/src/bucket/BucketManagerImpl.h
@@ -29,7 +29,7 @@ class TmpDir;
 class AbstractLedgerTxn;
 class Application;
 class Bucket;
-class BucketList;
+class LiveBucketList;
 class BucketSnapshotManager;
 struct BucketEntryCounters;
 enum class LedgerEntryTypeAndDurability : uint32_t;
@@ -41,26 +41,30 @@ class BucketManagerImpl : public BucketManager
     static std::string const kLockFilename;
 
     Application& mApp;
-    std::unique_ptr<BucketList> mBucketList;
+    std::unique_ptr<LiveBucketList> mLiveBucketList;
+    std::unique_ptr<HotArchiveBucketList> mHotArchiveBucketList;
     std::unique_ptr<BucketSnapshotManager> mSnapshotManager;
     std::unique_ptr<TmpDirManager> mTmpDirManager;
     std::unique_ptr<TmpDir> mWorkDir;
     std::map<Hash, std::shared_ptr<Bucket>> mSharedBuckets;
-    std::shared_ptr<SearchableBucketListSnapshot>
+    std::shared_ptr<SearchableLiveBucketListSnapshot>
         mSearchableBucketListSnapshot{};
 
     // Lock for managing raw Bucket files or the bucket directory. This lock is
     // only required for file access, but is not required for logical changes to
-    // the BucketList (i.e. addBatch).
+    // a BucketList (i.e. addLiveBatch).
     mutable std::recursive_mutex mBucketMutex;
     std::unique_ptr<std::string> mLockedBucketDir;
-    medida::Meter& mBucketObjectInsertBatch;
-    medida::Timer& mBucketAddBatch;
+    medida::Meter& mBucketLiveObjectInsertBatch;
+    medida::Meter& mBucketArchiveObjectInsertBatch;
+    medida::Timer& mBucketAddLiveBatch;
+    medida::Timer& mBucketAddArchiveBatch;
     medida::Timer& mBucketSnapMerge;
     medida::Counter& mSharedBucketsSize;
     medida::Meter& mBucketListDBBloomMisses;
     medida::Meter& mBucketListDBBloomLookups;
-    medida::Counter& mBucketListSizeCounter;
+    medida::Counter& mLiveBucketListSizeCounter;
+    medida::Counter& mArchiveBucketListSizeCounter;
     EvictionCounters mBucketListEvictionCounters;
     MergeCounters mMergeCounters;
     std::shared_ptr<EvictionStatistics> mEvictionStatistics{};
@@ -71,15 +75,19 @@ class BucketManagerImpl : public BucketManager
 
     std::future<EvictionResult> mEvictionFuture{};
 
-    bool const mDeleteEntireBucketDirInDtor;
-
     // Records bucket-merges that are currently _live_ in some FutureBucket, in
     // the sense of either running, or finished (with or without the
     // FutureBucket being resolved). Entries in this map will be cleared when
     // the FutureBucket is _cleared_ (typically when the owning BucketList level
     // is committed).
-    UnorderedMap<MergeKey, std::shared_future<std::shared_ptr<Bucket>>>
-        mLiveFutures;
+
+    using LiveBucketFutureT = std::shared_future<std::shared_ptr<LiveBucket>>;
+    using HotArchiveBucketFutureT =
+        std::shared_future<std::shared_ptr<HotArchiveBucket>>;
+    using BucketFutureT =
+        std::variant<LiveBucketFutureT, HotArchiveBucketFutureT>;
+
+    UnorderedMap<MergeKey, BucketFutureT> mLiveFutures;
 
     // Records bucket-merges that are _finished_, i.e. have been adopted as
     // (possibly redundant) bucket files. This is a "weak" (bi-multi-)map of
@@ -98,6 +106,24 @@ class BucketManagerImpl : public BucketManager
                                          size_t numEntries) const;
     medida::Timer& getPointLoadTimer(LedgerEntryType t) const;
 
+    template <class BucketT>
+    std::shared_ptr<BucketT>
+    adoptFileAsBucket(std::string const& filename, uint256 const& hash,
+                      MergeKey* mergeKey,
+                      std::unique_ptr<BucketIndex const> index,
+                      std::optional<uint32_t> epoch = std::nullopt);
+
+    template <class BucketT>
+    std::shared_ptr<BucketT> getBucketByHash(uint256 const& hash);
+
+    template <class BucketT>
+    std::shared_future<std::shared_ptr<BucketT>>
+    getMergeFuture(MergeKey const& key);
+
+    template <class BucketT>
+    void putMergeFuture(MergeKey const& key,
+                        std::shared_future<std::shared_ptr<BucketT>>);
+
 #ifdef BUILD_TESTS
     bool mUseFakeTestValuesForNextClose{false};
     uint32_t mFakeTestProtocolVersion;
@@ -106,8 +132,10 @@ class BucketManagerImpl : public BucketManager
 
   protected:
     void calculateSkipValues(LedgerHeader& currentHeader);
-    std::string bucketFilename(std::string const& bucketHexHash);
-    std::string bucketFilename(Hash const& hash);
+    std::string bucketFilename(std::string const& bucketHexHash,
+                               std::optional<uint32_t> epoch = std::nullopt);
+    std::string bucketFilename(Hash const& hash,
+                               std::optional<uint32_t> epoch = std::nullopt);
 
   public:
     BucketManagerImpl(Application& app);
@@ -117,7 +145,8 @@ class BucketManagerImpl : public BucketManager
     std::string bucketIndexFilename(Hash const& hash) const override;
     std::string const& getTmpDir() override;
     std::string const& getBucketDir() const override;
-    BucketList& getBucketList() override;
+    LiveBucketList& getLiveBucketList() override;
+    HotArchiveBucketList& getHotArchiveBucketList() override;
     BucketSnapshotManager& getBucketSnapshotManager() const override;
     medida::Timer& getMergeTimer() override;
     MergeCounters readMergeCounters() override;
@@ -125,32 +154,52 @@ class BucketManagerImpl : public BucketManager
     TmpDirManager& getTmpDirManager() override;
     bool renameBucketDirFile(std::filesystem::path const& src,
                              std::filesystem::path const& dst) override;
-    std::shared_ptr<Bucket>
-    adoptFileAsBucket(std::string const& filename, uint256 const& hash,
-                      MergeKey* mergeKey,
-                      std::unique_ptr<BucketIndex const> index) override;
+    std::shared_ptr<LiveBucket>
+    adoptFileAsLiveBucket(std::string const& filename, uint256 const& hash,
+                          MergeKey* mergeKey,
+                          std::unique_ptr<BucketIndex const> index) override;
+    std::shared_ptr<HotArchiveBucket> adoptFileAsHotArchiveBucket(
+        std::string const& filename, uint256 const& hash, MergeKey* mergeKey,
+        std::unique_ptr<BucketIndex const> index) override;
+    std::shared_ptr<ColdArchiveBucket> adoptFileAsPendingColdArchiveBucket(
+        std::string const& filename, uint256 const& hash,
+        std::unique_ptr<BucketIndex const> index, uint32_t epoch) override;
     void noteEmptyMergeOutput(MergeKey const& mergeKey) override;
     std::shared_ptr<Bucket> getBucketIfExists(uint256 const& hash) override;
-    std::shared_ptr<Bucket> getBucketByHash(uint256 const& hash) override;
-
-    std::shared_future<std::shared_ptr<Bucket>>
-    getMergeFuture(MergeKey const& key) override;
-    void putMergeFuture(MergeKey const& key,
-                        std::shared_future<std::shared_ptr<Bucket>>) override;
+    std::shared_ptr<LiveBucket>
+    getLiveBucketByHash(uint256 const& hash) override;
+    std::shared_ptr<HotArchiveBucket>
+    getHotArchiveBucketByHash(uint256 const& hash) override;
+    std::shared_ptr<ColdArchiveBucket>
+    getPendingColdArchiveBucketByEpoch(uint32_t epoch) override;
+
+    std::shared_future<std::shared_ptr<LiveBucket>>
+    getLiveMergeFuture(MergeKey const& key) override;
+    std::shared_future<std::shared_ptr<HotArchiveBucket>>
+    getHotArchiveMergeFuture(MergeKey const& key) override;
+    void putLiveMergeFuture(
+        MergeKey const& key,
+        std::shared_future<std::shared_ptr<LiveBucket>>) override;
+    void putHotArchiveMergeFuture(
+        MergeKey const& key,
+        std::shared_future<std::shared_ptr<HotArchiveBucket>>) override;
 #ifdef BUILD_TESTS
     void clearMergeFuturesForTesting() override;
 #endif
 
     void forgetUnreferencedBuckets() override;
-    void addBatch(Application& app, LedgerHeader header,
-                  std::vector<LedgerEntry> const& initEntries,
-                  std::vector<LedgerEntry> const& liveEntries,
-                  std::vector<LedgerKey> const& deadEntries) override;
+    void addLiveBatch(Application& app, LedgerHeader header,
+                      std::vector<LedgerEntry> const& initEntries,
+                      std::vector<LedgerEntry> const& liveEntries,
+                      std::vector<LedgerKey> const& deadEntries) override;
+    void
+    addHotArchiveBatch(Application& app, LedgerHeader header,
+                       std::vector<LedgerEntry> const& archivedEntries,
+                       std::vector<LedgerKey> const& restoredEntries,
+                       std::vector<LedgerKey> const& deletedEntries) override;
     void snapshotLedger(LedgerHeader& currentHeader) override;
     void maybeSetIndex(std::shared_ptr<Bucket> b,
                        std::unique_ptr<BucketIndex const>&& index) override;
-    void scanForEvictionLegacy(AbstractLedgerTxn& ltx,
-                               uint32_t ledgerSeq) override;
     void startBackgroundEvictionScan(uint32_t ledgerSeq) override;
     void
     resolveBackgroundEvictionScan(AbstractLedgerTxn& ltx, uint32_t ledgerSeq,
@@ -161,7 +210,7 @@ class BucketManagerImpl : public BucketManager
 
 #ifdef BUILD_TESTS
     // Install a fake/assumed ledger version and bucket list hash to use in next
-    // call to addBatch and snapshotLedger. This interface exists only for
+    // call to addLiveBatch and snapshotLedger. This interface exists only for
     // testing in a specific type of history replay.
     void setNextCloseVersionAndHashForTesting(uint32_t protocolVers,
                                               uint256 const& hash) override;
@@ -184,7 +233,7 @@ class BucketManagerImpl : public BucketManager
     std::map<LedgerKey, LedgerEntry>
     loadCompleteLedgerState(HistoryArchiveState const& has) override;
 
-    std::shared_ptr<Bucket>
+    std::shared_ptr<LiveBucket>
     mergeBuckets(HistoryArchiveState const& has) override;
 
     void visitLedgerEntries(
@@ -197,8 +246,9 @@ class BucketManagerImpl : public BucketManager
 
     Config const& getConfig() const override;
 
-    std::shared_ptr<SearchableBucketListSnapshot>
-    getSearchableBucketListSnapshot() override;
+    std::shared_ptr<SearchableLiveBucketListSnapshot>
+    getSearchableLiveBucketListSnapshot() override;
+
     void reportBucketEntryCountMetrics() override;
 };
 
diff --git a/src/bucket/BucketOutputIterator.cpp b/src/bucket/BucketOutputIterator.cpp
index 412cfad724..4bff33530a 100644
--- a/src/bucket/BucketOutputIterator.cpp
+++ b/src/bucket/BucketOutputIterator.cpp
@@ -6,9 +6,13 @@
 #include "bucket/Bucket.h"
 #include "bucket/BucketIndex.h"
 #include "bucket/BucketManager.h"
+#include "ledger/LedgerTypeUtils.h"
 #include "util/GlobalChecks.h"
+#include "util/ProtocolVersion.h"
+#include "xdr/Stellar-ledger.h"
 #include <Tracy.hpp>
 #include <filesystem>
+#include <optional>
 
 namespace stellar
 {
@@ -17,15 +21,16 @@ namespace stellar
  * Helper class that points to an output tempfile. Absorbs BucketEntries and
  * hashes them while writing to either destination. Produces a Bucket when done.
  */
-BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir,
-                                           bool keepDeadEntries,
-                                           BucketMetadata const& meta,
-                                           MergeCounters& mc,
-                                           asio::io_context& ctx, bool doFsync)
+template <typename BucketT>
+BucketOutputIterator<BucketT>::BucketOutputIterator(
+    std::string const& tmpDir, bool keepTombstoneEntries,
+    BucketMetadata const& meta, MergeCounters& mc, asio::io_context& ctx,
+    bool doFsync, std::optional<uint32_t> epoch)
     : mFilename(Bucket::randomBucketName(tmpDir))
     , mOut(ctx, doFsync)
     , mBuf(nullptr)
-    , mKeepDeadEntries(keepDeadEntries)
+    , mEpoch(epoch)
+    , mKeepTombstoneEntries(keepTombstoneEntries)
     , mMeta(meta)
     , mMergeCounters(mc)
 {
@@ -37,34 +42,131 @@ BucketOutputIterator::BucketOutputIterator(std::string const& tmpDir,
 
     if (protocolVersionStartsFrom(
             meta.ledgerVersion,
-            Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+            LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
     {
-        BucketEntry bme;
-        bme.type(METAENTRY);
-        bme.metaEntry() = mMeta;
-        put(bme);
+
+        if constexpr (std::is_same_v<BucketT, LiveBucket>)
+        {
+            BucketEntry bme;
+            bme.type(METAENTRY);
+            bme.metaEntry() = mMeta;
+            put(bme);
+        }
+        else if constexpr (std::is_same_v<BucketT, HotArchiveBucket>)
+        {
+            releaseAssertOrThrow(protocolVersionStartsFrom(
+                meta.ledgerVersion,
+                Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION));
+
+            HotArchiveBucketEntry bme;
+            bme.type(HOT_ARCHIVE_METAENTRY);
+            bme.metaEntry() = mMeta;
+            put(bme);
+        }
+        else
+        {
+            releaseAssertOrThrow(protocolVersionStartsFrom(
+                meta.ledgerVersion,
+                Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION));
+
+            ColdArchiveBucketEntry bme;
+            bme.type(COLD_ARCHIVE_METAENTRY);
+            bme.metaEntry() = mMeta;
+            put(bme);
+        }
+
         mPutMeta = true;
     }
 }
 
+template <typename BucketT>
 void
-BucketOutputIterator::put(BucketEntry const& e)
+BucketOutputIterator<BucketT>::put(BucketEntryT const& e)
 {
     ZoneScoped;
-    Bucket::checkProtocolLegality(e, mMeta.ledgerVersion);
-    if (e.type() == METAENTRY)
+
+    if constexpr (std::is_same_v<BucketT, LiveBucket>)
     {
-        if (mPutMeta)
+        LiveBucket::checkProtocolLegality(e, mMeta.ledgerVersion);
+        if (e.type() == METAENTRY)
         {
-            throw std::runtime_error(
-                "putting META entry in bucket after initial entry");
+            if (mPutMeta)
+            {
+                throw std::runtime_error(
+                    "putting META entry in bucket after initial entry");
+            }
+        }
+
+        if (!mKeepTombstoneEntries && BucketT::isTombstoneEntry(e))
+        {
+            ++mMergeCounters.mOutputIteratorTombstoneElisions;
+            return;
         }
     }
+    else if constexpr (std::is_same_v<BucketT, HotArchiveBucket>)
+    {
+        if (e.type() == HOT_ARCHIVE_METAENTRY)
+        {
+            if (mPutMeta)
+            {
+                throw std::runtime_error(
+                    "putting META entry in bucket after initial entry");
+            }
+        }
+        else
+        {
+            if (e.type() == HOT_ARCHIVE_ARCHIVED)
+            {
+                if (!isSorobanEntry(e.archivedEntry().data))
+                {
+                    throw std::runtime_error(
+                        "putting non-soroban entry in hot archive bucket");
+                }
+            }
+            else
+            {
+                if (!isSorobanEntry(e.key()))
+                {
+                    throw std::runtime_error(
+                        "putting non-soroban entry in hot archive bucket");
+                }
+            }
+        }
 
-    if (!mKeepDeadEntries && e.type() == DEADENTRY)
+        // HOT_ARCHIVE_LIVE entries are dropped in the last bucket level
+        // (similar to DEADENTRY) on live BucketLists
+        if (!mKeepTombstoneEntries && BucketT::isTombstoneEntry(e))
+        {
+            ++mMergeCounters.mOutputIteratorTombstoneElisions;
+            return;
+        }
+    }
+    else
     {
-        ++mMergeCounters.mOutputIteratorTombstoneElisions;
-        return;
+        if (e.type() == COLD_ARCHIVE_METAENTRY)
+        {
+            if (mPutMeta)
+            {
+                throw std::runtime_error(
+                    "putting META entry in bucket after initial entry");
+            }
+        }
+        else if (e.type() == COLD_ARCHIVE_ARCHIVED_LEAF)
+        {
+            if (!isSorobanEntry(e.archivedLeaf().archivedEntry.data))
+            {
+                throw std::runtime_error(
+                    "putting non-soroban entry in cold archive bucket");
+            }
+        }
+        else if (e.type() == COLD_ARCHIVE_DELETED_LEAF)
+        {
+            if (!isSorobanEntry(e.deletedLeaf().deletedKey))
+            {
+                throw std::runtime_error(
+                    "putting non-soroban entry in cold archive bucket");
+            }
+        }
     }
 
     // Check to see if there's an existing buffered entry.
@@ -85,7 +187,7 @@ BucketOutputIterator::put(BucketEntry const& e)
     }
     else
     {
-        mBuf = std::make_unique<BucketEntry>();
+        mBuf = std::make_unique<BucketEntryT>();
     }
 
     // In any case, replace *mBuf with e.
@@ -93,10 +195,10 @@ BucketOutputIterator::put(BucketEntry const& e)
     *mBuf = e;
 }
 
-std::shared_ptr<Bucket>
-BucketOutputIterator::getBucket(BucketManager& bucketManager,
-                                bool shouldSynchronouslyIndex,
-                                MergeKey* mergeKey)
+template <typename BucketT>
+std::shared_ptr<BucketT>
+BucketOutputIterator<BucketT>::getBucket(BucketManager& bucketManager,
+                                         MergeKey* mergeKey)
 {
     ZoneScoped;
     if (mBuf)
@@ -117,25 +219,46 @@ BucketOutputIterator::getBucket(BucketManager& bucketManager,
         {
             bucketManager.noteEmptyMergeOutput(*mergeKey);
         }
-        return std::make_shared<Bucket>();
+        return std::make_shared<BucketT>();
     }
 
     auto hash = mHasher.finish();
     std::unique_ptr<BucketIndex const> index{};
 
-    // If this bucket needs to be indexed and is not already indexed
-    if (shouldSynchronouslyIndex)
+    // either it's a new bucket or we just reconstructed a bucket
+    // we already have, in any case ensure we have an index
+    if (auto b = bucketManager.getBucketIfExists(hash); !b || !b->isIndexed())
+    {
+        index = BucketIndex::createIndex<BucketEntryT>(bucketManager, mFilename,
+                                                       hash);
+    }
+
+    if constexpr (std::is_same_v<BucketT, LiveBucket>)
     {
-        // either it's a new bucket or we just reconstructed a bucket
-        // we already have, in any case ensure we have an index
-        if (auto b = bucketManager.getBucketIfExists(hash);
-            !b || !b->isIndexed())
+        return bucketManager.adoptFileAsLiveBucket(mFilename.string(), hash,
+                                                   mergeKey, std::move(index));
+    }
+    else if constexpr (std::is_same_v<BucketT, HotArchiveBucket>)
+    {
+
+        return bucketManager.adoptFileAsHotArchiveBucket(
+            mFilename.string(), hash, mergeKey, std::move(index));
+    }
+    else
+    {
+        if (mEpoch)
+        {
+            return bucketManager.adoptFileAsPendingColdArchiveBucket(
+                mFilename.string(), hash, std::move(index), *mEpoch);
+        }
+        else
         {
-            index = BucketIndex::createIndex(bucketManager, mFilename, hash);
+            releaseAssert(false);
         }
     }
-
-    return bucketManager.adoptFileAsBucket(mFilename.string(), hash, mergeKey,
-                                           std::move(index));
 }
+
+template class BucketOutputIterator<LiveBucket>;
+template class BucketOutputIterator<HotArchiveBucket>;
+template class BucketOutputIterator<ColdArchiveBucket>;
 }
diff --git a/src/bucket/BucketOutputIterator.h b/src/bucket/BucketOutputIterator.h
index 2b035f5f11..3add6c4560 100644
--- a/src/bucket/BucketOutputIterator.h
+++ b/src/bucket/BucketOutputIterator.h
@@ -4,6 +4,7 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/Bucket.h"
 #include "bucket/BucketManager.h"
 #include "bucket/LedgerCmp.h"
 #include "util/XDRStream.h"
@@ -20,17 +21,27 @@ class BucketManager;
 
 // Helper class that writes new elements to a file and returns a bucket
 // when finished.
-class BucketOutputIterator
+template <typename BucketT> class BucketOutputIterator
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket> ||
+                  std::is_same_v<BucketT, ColdArchiveBucket>);
+
+    using BucketEntryT = std::conditional_t<
+        std::is_same_v<BucketT, LiveBucket>, BucketEntry,
+        std::conditional_t<std::is_same_v<BucketT, HotArchiveBucket>,
+                           HotArchiveBucketEntry, ColdArchiveBucketEntry>>;
+
   protected:
     std::filesystem::path mFilename;
     XDROutputFileStream mOut;
-    BucketEntryIdCmp mCmp;
-    std::unique_ptr<BucketEntry> mBuf;
+    BucketEntryIdCmp<BucketT> mCmp;
+    std::unique_ptr<BucketEntryT> mBuf;
     SHA256 mHasher;
+    std::optional<uint32_t> mEpoch;
     size_t mBytesPut{0};
     size_t mObjectsPut{0};
-    bool mKeepDeadEntries{true};
+    bool mKeepTombstoneEntries{true};
     BucketMetadata mMeta;
     bool mPutMeta{false};
     MergeCounters& mMergeCounters;
@@ -43,14 +54,21 @@ class BucketOutputIterator
     // version new enough that it should _write_ the metadata to the stream in
     // the form of a METAENTRY; but that's not a thing the caller gets to decide
     // (or forget to do), it's handled automatically.
-    BucketOutputIterator(std::string const& tmpDir, bool keepDeadEntries,
+    // If the bucket being constructed is a pending ColdArchive bucket, the
+    // current archival epoch must be passed in for proper naming. Otherwise,
+    // epoch should be nullopt.
+    BucketOutputIterator(std::string const& tmpDir, bool keepTombstoneEntries,
                          BucketMetadata const& meta, MergeCounters& mc,
-                         asio::io_context& ctx, bool doFsync);
+                         asio::io_context& ctx, bool doFsync,
+                         std::optional<uint32_t> epoch = std::nullopt);
 
-    void put(BucketEntry const& e);
+    void put(BucketEntryT const& e);
 
-    std::shared_ptr<Bucket> getBucket(BucketManager& bucketManager,
-                                      bool shouldSynchronouslyIndex,
-                                      MergeKey* mergeKey = nullptr);
+    std::shared_ptr<BucketT> getBucket(BucketManager& bucketManager,
+                                       MergeKey* mergeKey = nullptr);
 };
+
+typedef BucketOutputIterator<LiveBucket> LiveBucketOutputIterator;
+typedef BucketOutputIterator<HotArchiveBucket> HotArchiveBucketOutputIterator;
+typedef BucketOutputIterator<ColdArchiveBucket> ColdArchiveBucketOutputIterator;
 }
diff --git a/src/bucket/BucketSnapshot.cpp b/src/bucket/BucketSnapshot.cpp
index 921076af82..50dbe30b58 100644
--- a/src/bucket/BucketSnapshot.cpp
+++ b/src/bucket/BucketSnapshot.cpp
@@ -8,66 +8,77 @@
 #include "ledger/LedgerTxn.h"
 #include "ledger/LedgerTypeUtils.h"
 #include "util/XDRStream.h"
+#include <type_traits>
 
 namespace stellar
 {
-BucketSnapshot::BucketSnapshot(std::shared_ptr<Bucket const> const b)
+template <class BucketT>
+BucketSnapshotBase<BucketT>::BucketSnapshotBase(
+    std::shared_ptr<BucketT const> const b)
     : mBucket(b)
 {
     releaseAssert(mBucket);
 }
 
-BucketSnapshot::BucketSnapshot(BucketSnapshot const& b)
+template <class BucketT>
+BucketSnapshotBase<BucketT>::BucketSnapshotBase(
+    BucketSnapshotBase<BucketT> const& b)
     : mBucket(b.mBucket), mStream(nullptr)
 {
     releaseAssert(mBucket);
 }
 
+template <class BucketT>
 bool
-BucketSnapshot::isEmpty() const
+BucketSnapshotBase<BucketT>::isEmpty() const
 {
     releaseAssert(mBucket);
     return mBucket->isEmpty();
 }
 
-std::pair<std::optional<BucketEntry>, bool>
-BucketSnapshot::getEntryAtOffset(LedgerKey const& k, std::streamoff pos,
-                                 size_t pageSize) const
+template <class BucketT>
+std::pair<std::shared_ptr<typename BucketSnapshotBase<BucketT>::BucketEntryT>,
+          bool>
+BucketSnapshotBase<BucketT>::getEntryAtOffset(LedgerKey const& k,
+                                              std::streamoff pos,
+                                              size_t pageSize) const
 {
     ZoneScoped;
     if (isEmpty())
     {
-        return {std::nullopt, false};
+        return {nullptr, false};
     }
 
     auto& stream = getStream();
     stream.seek(pos);
 
-    BucketEntry be;
+    BucketEntryT be;
     if (pageSize == 0)
     {
         if (stream.readOne(be))
         {
-            return {std::make_optional(be), false};
+            return {std::make_shared<BucketEntryT>(be), false};
         }
     }
     else if (stream.readPage(be, k, pageSize))
     {
-        return {std::make_optional(be), false};
+        return {std::make_shared<BucketEntryT>(be), false};
     }
 
     // Mark entry miss for metrics
     mBucket->getIndex().markBloomMiss();
-    return {std::nullopt, true};
+    return {nullptr, true};
 }
 
-std::pair<std::optional<BucketEntry>, bool>
-BucketSnapshot::getBucketEntry(LedgerKey const& k) const
+template <class BucketT>
+std::pair<std::shared_ptr<typename BucketSnapshotBase<BucketT>::BucketEntryT>,
+          bool>
+BucketSnapshotBase<BucketT>::getBucketEntry(LedgerKey const& k) const
 {
     ZoneScoped;
     if (isEmpty())
     {
-        return {std::nullopt, false};
+        return {nullptr, false};
     }
 
     auto pos = mBucket->getIndex().lookup(k);
@@ -77,7 +88,7 @@ BucketSnapshot::getBucketEntry(LedgerKey const& k) const
                                 mBucket->getIndex().getPageSize());
     }
 
-    return {std::nullopt, false};
+    return {nullptr, false};
 }
 
 // When searching for an entry, BucketList calls this function on every bucket.
@@ -85,10 +96,11 @@ BucketSnapshot::getBucketEntry(LedgerKey const& k) const
 // If we find the entry, we remove the found key from keys so that later buckets
 // do not load shadowed entries. If we don't find the entry, we do not remove it
 // from keys so that it will be searched for again at a lower level.
+template <class BucketT>
 void
-BucketSnapshot::loadKeysWithLimits(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
-                                   std::vector<LedgerEntry>& result,
-                                   LedgerKeyMeter* lkMeter) const
+BucketSnapshotBase<BucketT>::loadKeys(
+    std::set<LedgerKey, LedgerEntryIdCmp>& keys,
+    std::vector<BulkLoadReturnT>& result, LedgerKeyMeter* lkMeter) const
 {
     ZoneScoped;
     if (isEmpty())
@@ -101,32 +113,65 @@ BucketSnapshot::loadKeysWithLimits(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
     auto indexIter = index.begin();
     while (currKeyIt != keys.end() && indexIter != index.end())
     {
+        // lkMeter only supported for LiveBucketList
+        if (std::is_same_v<BucketT, LiveBucket> && lkMeter)
+        {
+            auto keySize = xdr::xdr_size(*currKeyIt);
+            if (!lkMeter->canLoad(*currKeyIt, keySize))
+            {
+                // If the transactions containing this key have a remaining
+                // quota less than the size of the key, we cannot load the
+                // entry, as xdr_size(key) <= xdr_size(entry). Here we consume
+                // keySize bytes from the quotas of transactions containing the
+                // key so that they will have zero remaining quota and
+                // additional entries belonging to only those same transactions
+                // will not be loaded even if they would fit in the remaining
+                // quota before this update.
+                lkMeter->updateReadQuotasForKey(*currKeyIt, keySize);
+                currKeyIt = keys.erase(currKeyIt);
+                continue;
+            }
+        }
         auto [offOp, newIndexIter] = index.scan(indexIter, *currKeyIt);
         indexIter = newIndexIter;
         if (offOp)
         {
             auto [entryOp, bloomMiss] = getEntryAtOffset(
                 *currKeyIt, *offOp, mBucket->getIndex().getPageSize());
+
             if (entryOp)
             {
-                if (entryOp->type() != DEADENTRY)
+                // Don't return tombstone entries, as these do not exist wrt
+                // ledger state
+                if (!BucketT::isTombstoneEntry(*entryOp))
                 {
-                    bool addEntry = true;
-                    if (lkMeter)
+                    // Only live bucket loads can be metered
+                    if constexpr (std::is_same_v<BucketT, LiveBucket>)
                     {
-                        // Here, we are metering after the entry has been
-                        // loaded. This is because we need to know the size of
-                        // the entry to meter it. Future work will add metering
-                        // at the xdr level.
-                        auto entrySize = xdr::xdr_size(entryOp->liveEntry());
-                        addEntry = lkMeter->canLoad(*currKeyIt, entrySize);
-                        lkMeter->updateReadQuotasForKey(*currKeyIt, entrySize);
+                        bool addEntry = true;
+                        if (lkMeter)
+                        {
+                            // Here, we are metering after the entry has been
+                            // loaded. This is because we need to know the size
+                            // of the entry to meter it. Future work will add
+                            // metering at the xdr level.
+                            auto entrySize =
+                                xdr::xdr_size(entryOp->liveEntry());
+                            addEntry = lkMeter->canLoad(*currKeyIt, entrySize);
+                            lkMeter->updateReadQuotasForKey(*currKeyIt,
+                                                            entrySize);
+                        }
+                        if (addEntry)
+                        {
+                            result.push_back(entryOp->liveEntry());
+                        }
                     }
-                    if (addEntry)
+                    else
                     {
-                        result.push_back(entryOp->liveEntry());
+                        result.push_back(*entryOp);
                     }
                 }
+
                 currKeyIt = keys.erase(currKeyIt);
                 continue;
             }
@@ -137,7 +182,7 @@ BucketSnapshot::loadKeysWithLimits(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
 }
 
 std::vector<PoolID> const&
-BucketSnapshot::getPoolIDsByAsset(Asset const& asset) const
+LiveBucketSnapshot::getPoolIDsByAsset(Asset const& asset) const
 {
     static std::vector<PoolID> const emptyVec = {};
     if (isEmpty())
@@ -149,13 +194,13 @@ BucketSnapshot::getPoolIDsByAsset(Asset const& asset) const
 }
 
 bool
-BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan,
-                                uint32_t ledgerSeq,
-                                std::list<EvictionResultEntry>& evictableKeys,
-                                SearchableBucketListSnapshot& bl) const
+LiveBucketSnapshot::scanForEviction(
+    EvictionIterator& iter, uint32_t& bytesToScan, uint32_t ledgerSeq,
+    std::list<EvictionResultEntry>& evictableKeys,
+    SearchableLiveBucketListSnapshot& bl) const
 {
     ZoneScoped;
-    if (isEmpty() || protocolVersionIsBefore(Bucket::getBucketVersion(mBucket),
+    if (isEmpty() || protocolVersionIsBefore(mBucket->getBucketVersion(),
                                              SOROBAN_PROTOCOL_VERSION))
     {
         // EOF, skip to next bucket
@@ -240,8 +285,9 @@ BucketSnapshot::scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan,
     return false;
 }
 
+template <class BucketT>
 XDRInputFileStream&
-BucketSnapshot::getStream() const
+BucketSnapshotBase<BucketT>::getStream() const
 {
     releaseAssertOrThrow(!isEmpty());
     if (!mStream)
@@ -252,9 +298,36 @@ BucketSnapshot::getStream() const
     return *mStream;
 }
 
-std::shared_ptr<Bucket const>
-BucketSnapshot::getRawBucket() const
+template <class BucketT>
+std::shared_ptr<BucketT const>
+BucketSnapshotBase<BucketT>::getRawBucket() const
 {
     return mBucket;
 }
+
+HotArchiveBucketSnapshot::HotArchiveBucketSnapshot(
+    std::shared_ptr<HotArchiveBucket const> const b)
+    : BucketSnapshotBase<HotArchiveBucket>(b)
+{
+}
+
+LiveBucketSnapshot::LiveBucketSnapshot(
+    std::shared_ptr<LiveBucket const> const b)
+    : BucketSnapshotBase<LiveBucket>(b)
+{
+}
+
+HotArchiveBucketSnapshot::HotArchiveBucketSnapshot(
+    HotArchiveBucketSnapshot const& b)
+    : BucketSnapshotBase<HotArchiveBucket>(b)
+{
+}
+
+LiveBucketSnapshot::LiveBucketSnapshot(LiveBucketSnapshot const& b)
+    : BucketSnapshotBase<LiveBucket>(b)
+{
+}
+
+template class BucketSnapshotBase<LiveBucket>;
+template class BucketSnapshotBase<HotArchiveBucket>;
 }
\ No newline at end of file
diff --git a/src/bucket/BucketSnapshot.h b/src/bucket/BucketSnapshot.h
index 18faa51c34..6aee00e6f1 100644
--- a/src/bucket/BucketSnapshot.h
+++ b/src/bucket/BucketSnapshot.h
@@ -4,26 +4,41 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/Bucket.h"
 #include "bucket/LedgerCmp.h"
 #include "util/NonCopyable.h"
+#include "xdr/Stellar-ledger-entries.h"
 #include <list>
 #include <set>
 
-#include <optional>
-
 namespace stellar
 {
 
-class Bucket;
 class XDRInputFileStream;
-class SearchableBucketListSnapshot;
 struct EvictionResultEntry;
 class LedgerKeyMeter;
+class SearchableLiveBucketListSnapshot;
 
 // A lightweight wrapper around Bucket for thread safe BucketListDB lookups
-class BucketSnapshot : public NonMovable
+template <class BucketT> class BucketSnapshotBase : public NonMovable
 {
-    std::shared_ptr<Bucket const> const mBucket;
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket> ||
+                  std::is_same_v<BucketT, ColdArchiveBucket>);
+
+  protected:
+    using BucketEntryT = std::conditional_t<
+        std::is_same_v<BucketT, LiveBucket>, BucketEntry,
+        std::conditional_t<std::is_same_v<BucketT, HotArchiveBucket>,
+                           HotArchiveBucketEntry, ColdArchiveBucketEntry>>;
+
+    // LiveBucket returns LedgerEntry vector on call to loadKeys,
+    // HotArchiveBucket returns HotArchiveBucketEntry
+    using BulkLoadReturnT =
+        std::conditional_t<std::is_same_v<BucketT, LiveBucket>, LedgerEntry,
+                           HotArchiveBucketEntry>;
+
+    std::shared_ptr<BucketT const> const mBucket;
 
     // Lazily-constructed and retained for read path.
     mutable std::unique_ptr<XDRInputFileStream> mStream{};
@@ -37,32 +52,42 @@ class BucketSnapshot : public NonMovable
     // reads until key is found or the end of the page. Returns <BucketEntry,
     // bloomMiss>, where bloomMiss is true if a bloomMiss occurred during the
     // load.
-    std::pair<std::optional<BucketEntry>, bool>
+    std::pair<std::shared_ptr<BucketEntryT>, bool>
     getEntryAtOffset(LedgerKey const& k, std::streamoff pos,
                      size_t pageSize) const;
 
-    BucketSnapshot(std::shared_ptr<Bucket const> const b);
+    BucketSnapshotBase(std::shared_ptr<BucketT const> const b);
 
     // Only allow copy constructor, is threadsafe
-    BucketSnapshot(BucketSnapshot const& b);
-    BucketSnapshot& operator=(BucketSnapshot const&) = delete;
+    BucketSnapshotBase(BucketSnapshotBase const& b);
+    BucketSnapshotBase& operator=(BucketSnapshotBase const&) = delete;
 
   public:
     bool isEmpty() const;
-    std::shared_ptr<Bucket const> getRawBucket() const;
+    std::shared_ptr<BucketT const> getRawBucket() const;
 
     // Loads bucket entry for LedgerKey k. Returns <BucketEntry, bloomMiss>,
     // where bloomMiss is true if a bloomMiss occurred during the load.
-    std::pair<std::optional<BucketEntry>, bool>
+    std::pair<std::shared_ptr<BucketEntryT>, bool>
     getBucketEntry(LedgerKey const& k) const;
 
     // Loads LedgerEntry's for given keys. When a key is found, the
     // entry is added to result and the key is removed from keys.
     // If a pointer to a LedgerKeyMeter is provided, a key will only be loaded
     // if the meter has a transaction with sufficient read quota for the key.
-    void loadKeysWithLimits(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
-                            std::vector<LedgerEntry>& result,
-                            LedgerKeyMeter* lkMeter) const;
+    // If Bucket is not of type LiveBucket, lkMeter is ignored.
+    void loadKeys(std::set<LedgerKey, LedgerEntryIdCmp>& keys,
+                  std::vector<BulkLoadReturnT>& result,
+                  LedgerKeyMeter* lkMeter) const;
+};
+
+class LiveBucketSnapshot : public BucketSnapshotBase<LiveBucket>
+{
+  public:
+    LiveBucketSnapshot(std::shared_ptr<LiveBucket const> const b);
+
+    // Only allow copy constructors, is threadsafe
+    LiveBucketSnapshot(LiveBucketSnapshot const& b);
 
     // Return all PoolIDs that contain the given asset on either side of the
     // pool
@@ -71,8 +96,15 @@ class BucketSnapshot : public NonMovable
     bool scanForEviction(EvictionIterator& iter, uint32_t& bytesToScan,
                          uint32_t ledgerSeq,
                          std::list<EvictionResultEntry>& evictableKeys,
-                         SearchableBucketListSnapshot& bl) const;
+                         SearchableLiveBucketListSnapshot& bl) const;
+};
+
+class HotArchiveBucketSnapshot : public BucketSnapshotBase<HotArchiveBucket>
+{
+  public:
+    HotArchiveBucketSnapshot(std::shared_ptr<HotArchiveBucket const> const b);
 
-    friend struct BucketLevelSnapshot;
+    // Only allow copy constructors, is threadsafe
+    HotArchiveBucketSnapshot(HotArchiveBucketSnapshot const& b);
 };
 }
\ No newline at end of file
diff --git a/src/bucket/BucketSnapshotManager.cpp b/src/bucket/BucketSnapshotManager.cpp
index 52f907307b..703da5c21f 100644
--- a/src/bucket/BucketSnapshotManager.cpp
+++ b/src/bucket/BucketSnapshotManager.cpp
@@ -3,8 +3,10 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "bucket/BucketSnapshotManager.h"
+#include "bucket/Bucket.h"
 #include "bucket/BucketListSnapshot.h"
 #include "main/Application.h"
+#include "util/GlobalChecks.h"
 #include "util/XDRStream.h" // IWYU pragma: keep
 
 #include "medida/meter.h"
@@ -15,12 +17,17 @@ namespace stellar
 {
 
 BucketSnapshotManager::BucketSnapshotManager(
-    Application& app, std::unique_ptr<BucketListSnapshot const>&& snapshot,
-    uint32_t numHistoricalSnapshots)
+    Application& app,
+    std::unique_ptr<BucketListSnapshot<LiveBucket> const>&& snapshot,
+    std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>&&
+        hotArchiveSnapshot,
+    uint32_t numLiveHistoricalSnapshots)
     : mApp(app)
-    , mCurrentSnapshot(std::move(snapshot))
-    , mHistoricalSnapshots()
-    , mNumHistoricalSnapshots(numHistoricalSnapshots)
+    , mCurrLiveSnapshot(std::move(snapshot))
+    , mCurrHotArchiveSnapshot(std::move(hotArchiveSnapshot))
+    , mLiveHistoricalSnapshots()
+    , mHotArchiveHistoricalSnapshots()
+    , mNumHistoricalSnapshots(numLiveHistoricalSnapshots)
     , mBulkLoadMeter(app.getMetrics().NewMeter(
           {"bucketlistDB", "query", "loads"}, "query"))
     , mBloomMisses(app.getMetrics().NewMeter(
@@ -29,14 +36,25 @@ BucketSnapshotManager::BucketSnapshotManager(
           {"bucketlistDB", "bloom", "lookups"}, "bloom"))
 {
     releaseAssert(threadIsMain());
+    releaseAssert(mCurrLiveSnapshot);
+    releaseAssert(mCurrHotArchiveSnapshot);
 }
 
-std::shared_ptr<SearchableBucketListSnapshot>
-BucketSnapshotManager::copySearchableBucketListSnapshot() const
+std::shared_ptr<SearchableLiveBucketListSnapshot>
+BucketSnapshotManager::copySearchableLiveBucketListSnapshot() const
 {
     // Can't use std::make_shared due to private constructor
-    return std::shared_ptr<SearchableBucketListSnapshot>(
-        new SearchableBucketListSnapshot(*this));
+    return std::shared_ptr<SearchableLiveBucketListSnapshot>(
+        new SearchableLiveBucketListSnapshot(*this));
+}
+
+std::shared_ptr<SearchableHotArchiveBucketListSnapshot>
+BucketSnapshotManager::copySearchableHotArchiveBucketListSnapshot() const
+{
+    releaseAssert(mCurrHotArchiveSnapshot);
+    // Can't use std::make_shared due to private constructor
+    return std::shared_ptr<SearchableHotArchiveBucketListSnapshot>(
+        new SearchableHotArchiveBucketListSnapshot(*this));
 }
 
 medida::Timer&
@@ -63,12 +81,43 @@ BucketSnapshotManager::recordBulkLoadMetrics(std::string const& label,
     return iter->second;
 }
 
+template <class SnapshotT>
 void
 BucketSnapshotManager::maybeUpdateSnapshot(
-    std::unique_ptr<BucketListSnapshot const>& snapshot,
-    std::map<uint32_t, std::unique_ptr<BucketListSnapshot const>>&
-        historicalSnapshots) const
+    std::unique_ptr<SnapshotT const>& snapshot,
+    std::map<uint32_t, std::unique_ptr<SnapshotT const>>& historicalSnapshots)
+    const
 {
+    static_assert(
+        std::is_same_v<SnapshotT, BucketListSnapshot<LiveBucket>> ||
+        std::is_same_v<SnapshotT, BucketListSnapshot<HotArchiveBucket>>);
+
+    auto const& managerSnapshot = [&]() -> auto const&
+    {
+        if constexpr (std::is_same_v<SnapshotT, BucketListSnapshot<LiveBucket>>)
+        {
+            return mCurrLiveSnapshot;
+        }
+        else
+        {
+            return mCurrHotArchiveSnapshot;
+        }
+    }
+    ();
+
+    auto const& managerHistoricalSnapshots = [&]() -> auto const&
+    {
+        if constexpr (std::is_same_v<SnapshotT, BucketListSnapshot<LiveBucket>>)
+        {
+            return mLiveHistoricalSnapshots;
+        }
+        else
+        {
+            return mHotArchiveHistoricalSnapshots;
+        }
+    }
+    ();
+
     // The canonical snapshot held by the BucketSnapshotManager is not being
     // modified. Rather, a thread is checking it's copy against the canonical
     // snapshot, so use a shared lock.
@@ -76,64 +125,74 @@ BucketSnapshotManager::maybeUpdateSnapshot(
 
     // First update current snapshot
     if (!snapshot ||
-        snapshot->getLedgerSeq() != mCurrentSnapshot->getLedgerSeq())
+        snapshot->getLedgerSeq() != managerSnapshot->getLedgerSeq())
     {
         // Should only update with a newer snapshot
         releaseAssert(!snapshot || snapshot->getLedgerSeq() <
-                                       mCurrentSnapshot->getLedgerSeq());
-        snapshot = std::make_unique<BucketListSnapshot>(*mCurrentSnapshot);
+                                       managerSnapshot->getLedgerSeq());
+        snapshot = std::make_unique<SnapshotT>(*managerSnapshot);
     }
 
     // Then update historical snapshots (if any exist)
-    if (mHistoricalSnapshots.empty())
+    if (managerHistoricalSnapshots.empty())
     {
         return;
     }
 
     // If size of manager's history map is different, or if the oldest snapshot
     // ledger seq is different, we need to update.
-    if (mHistoricalSnapshots.size() != historicalSnapshots.size() ||
-        mHistoricalSnapshots.begin()->first !=
+    if (managerHistoricalSnapshots.size() != historicalSnapshots.size() ||
+        managerHistoricalSnapshots.begin()->first !=
             historicalSnapshots.begin()->first)
     {
         // Copy current snapshot map into historicalSnapshots
         historicalSnapshots.clear();
-        for (auto const& [ledgerSeq, snap] : mHistoricalSnapshots)
+        for (auto const& [ledgerSeq, snap] : managerHistoricalSnapshots)
         {
-            historicalSnapshots.emplace(
-                ledgerSeq, std::make_unique<BucketListSnapshot>(*snap));
+            historicalSnapshots.emplace(ledgerSeq,
+                                        std::make_unique<SnapshotT>(*snap));
         }
     }
 }
 
 void
 BucketSnapshotManager::updateCurrentSnapshot(
-    std::unique_ptr<BucketListSnapshot const>&& newSnapshot)
+    std::unique_ptr<BucketListSnapshot<LiveBucket> const>&& liveSnapshot,
+    std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>&&
+        hotArchiveSnapshot)
 {
-    releaseAssert(newSnapshot);
     releaseAssert(threadIsMain());
 
-    // Updating the BucketSnapshotManager canonical snapshot, must lock
-    // exclusively for write access.
-    std::unique_lock<std::shared_mutex> lock(mSnapshotMutex);
-    releaseAssert(!mCurrentSnapshot || newSnapshot->getLedgerSeq() >=
-                                           mCurrentSnapshot->getLedgerSeq());
+    auto updateSnapshot = [numHistoricalSnapshots = mNumHistoricalSnapshots](
+                              auto& currentSnapshot, auto& historicalSnapshots,
+                              auto&& newSnapshot) {
+        releaseAssert(newSnapshot);
+        releaseAssert(!currentSnapshot || newSnapshot->getLedgerSeq() >=
+                                              currentSnapshot->getLedgerSeq());
 
-    // First update historical snapshots
-    if (mNumHistoricalSnapshots != 0)
-    {
-        // If historical snapshots are full, delete the oldest one
-        if (mHistoricalSnapshots.size() == mNumHistoricalSnapshots)
+        // First update historical snapshots
+        if (numHistoricalSnapshots != 0)
         {
-            mHistoricalSnapshots.erase(mHistoricalSnapshots.begin());
+            // If historical snapshots are full, delete the oldest one
+            if (historicalSnapshots.size() == numHistoricalSnapshots)
+            {
+                historicalSnapshots.erase(historicalSnapshots.begin());
+            }
+
+            historicalSnapshots.emplace(currentSnapshot->getLedgerSeq(),
+                                        std::move(currentSnapshot));
+            currentSnapshot = nullptr;
         }
 
-        mHistoricalSnapshots.emplace(mCurrentSnapshot->getLedgerSeq(),
-                                     std::move(mCurrentSnapshot));
-        mCurrentSnapshot = nullptr;
-    }
+        currentSnapshot.swap(newSnapshot);
+    };
 
-    mCurrentSnapshot.swap(newSnapshot);
+    // Updating the BucketSnapshotManager canonical snapshot, must lock
+    // exclusively for write access.
+    std::unique_lock<std::shared_mutex> lock(mSnapshotMutex);
+    updateSnapshot(mCurrLiveSnapshot, mLiveHistoricalSnapshots, liveSnapshot);
+    updateSnapshot(mCurrHotArchiveSnapshot, mHotArchiveHistoricalSnapshots,
+                   hotArchiveSnapshot);
 }
 
 void
@@ -170,4 +229,16 @@ BucketSnapshotManager::endPointLoadTimer(LedgerEntryType t,
         iter->second.Update(duration);
     }
 }
+
+template void
+BucketSnapshotManager::maybeUpdateSnapshot<BucketListSnapshot<LiveBucket>>(
+    std::unique_ptr<BucketListSnapshot<LiveBucket> const>& snapshot,
+    std::map<uint32_t, std::unique_ptr<BucketListSnapshot<LiveBucket> const>>&
+        historicalSnapshots) const;
+template void BucketSnapshotManager::maybeUpdateSnapshot<
+    BucketListSnapshot<HotArchiveBucket>>(
+    std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>& snapshot,
+    std::map<uint32_t,
+             std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>>&
+        historicalSnapshots) const;
 }
\ No newline at end of file
diff --git a/src/bucket/BucketSnapshotManager.h b/src/bucket/BucketSnapshotManager.h
index 71b33862b0..de44f6f165 100644
--- a/src/bucket/BucketSnapshotManager.h
+++ b/src/bucket/BucketSnapshotManager.h
@@ -4,6 +4,8 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/Bucket.h"
+#include "bucket/BucketList.h"
 #include "bucket/BucketManagerImpl.h"
 #include "util/NonCopyable.h"
 #include "util/UnorderedMap.h"
@@ -23,8 +25,10 @@ namespace stellar
 {
 
 class Application;
-class BucketList;
-class BucketListSnapshot;
+class LiveBucketList;
+template <class BucketT> class BucketListSnapshot;
+class SearchableLiveBucketListSnapshot;
+class SearchableHotArchiveBucketListSnapshot;
 
 // This class serves as the boundary between non-threadsafe singleton classes
 // (BucketManager, BucketList, Metrics, etc) and threadsafe, parallel BucketList
@@ -37,16 +41,20 @@ class BucketSnapshotManager : NonMovableOrCopyable
     // Snapshot that is maintained and periodically updated by BucketManager on
     // the main thread. When background threads need to generate or refresh a
     // snapshot, they will copy this snapshot.
-    std::unique_ptr<BucketListSnapshot const> mCurrentSnapshot{};
+    std::unique_ptr<BucketListSnapshot<LiveBucket> const> mCurrLiveSnapshot{};
+    std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>
+        mCurrHotArchiveSnapshot{};
 
     // ledgerSeq that the snapshot is based on -> snapshot
-    std::map<uint32_t, std::unique_ptr<BucketListSnapshot const>>
-        mHistoricalSnapshots;
+    std::map<uint32_t, std::unique_ptr<BucketListSnapshot<LiveBucket> const>>
+        mLiveHistoricalSnapshots;
+    std::map<uint32_t,
+             std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>>
+        mHotArchiveHistoricalSnapshots;
 
     uint32_t const mNumHistoricalSnapshots;
 
-    // Lock must be held when accessing mCurrentSnapshot and
-    // mHistoricalSnapshots
+    // Lock must be held when accessing any snapshot
     mutable std::shared_mutex mSnapshotMutex;
 
     mutable UnorderedMap<LedgerEntryType, medida::Timer&> mPointTimers{};
@@ -59,26 +67,35 @@ class BucketSnapshotManager : NonMovableOrCopyable
     mutable std::optional<VirtualClock::time_point> mTimerStart;
 
   public:
-    // Called by main thread to update mCurrentSnapshot whenever the BucketList
+    // Called by main thread to update snapshots whenever the BucketList
     // is updated
     void updateCurrentSnapshot(
-        std::unique_ptr<BucketListSnapshot const>&& newSnapshot);
+        std::unique_ptr<BucketListSnapshot<LiveBucket> const>&& liveSnapshot,
+        std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>&&
+            hotArchiveSnapshot);
+
     // numHistoricalLedgers is the number of historical snapshots that the
     // snapshot manager will maintain. If numHistoricalLedgers is 5, snapshots
     // will be capable of querying state from ledger [lcl, lcl - 5].
-    BucketSnapshotManager(Application& app,
-                          std::unique_ptr<BucketListSnapshot const>&& snapshot,
-                          uint32_t numHistoricalLedgers);
-
-    std::shared_ptr<SearchableBucketListSnapshot>
-    copySearchableBucketListSnapshot() const;
-
-    // Checks if snapshot is out of date with mCurrentSnapshot and updates
-    // it accordingly
-    void maybeUpdateSnapshot(
-        std::unique_ptr<BucketListSnapshot const>& snapshot,
-        std::map<uint32_t, std::unique_ptr<BucketListSnapshot const>>&
-            historicalSnapshots) const;
+    BucketSnapshotManager(
+        Application& app,
+        std::unique_ptr<BucketListSnapshot<LiveBucket> const>&& snapshot,
+        std::unique_ptr<BucketListSnapshot<HotArchiveBucket> const>&&
+            hotArchiveSnapshot,
+        uint32_t numHistoricalLedgers);
+
+    std::shared_ptr<SearchableLiveBucketListSnapshot>
+    copySearchableLiveBucketListSnapshot() const;
+
+    std::shared_ptr<SearchableHotArchiveBucketListSnapshot>
+    copySearchableHotArchiveBucketListSnapshot() const;
+
+    // Checks if snapshot is out of date and updates it accordingly
+    template <class SnapshotT>
+    void
+    maybeUpdateSnapshot(std::unique_ptr<SnapshotT const>& snapshot,
+                        std::map<uint32_t, std::unique_ptr<SnapshotT const>>&
+                            historicalSnapshots) const;
 
     // All metric recording functions must only be called by the main thread
     void startPointLoadTimer() const;
diff --git a/src/bucket/FutureBucket.cpp b/src/bucket/FutureBucket.cpp
index 981708e196..bc2dec6d16 100644
--- a/src/bucket/FutureBucket.cpp
+++ b/src/bucket/FutureBucket.cpp
@@ -18,6 +18,7 @@
 #include "util/GlobalChecks.h"
 #include "util/LogSlowExecution.h"
 #include "util/Logging.h"
+#include "util/ProtocolVersion.h"
 #include "util/Thread.h"
 #include <Tracy.hpp>
 #include <fmt/format.h>
@@ -25,16 +26,17 @@
 #include "medida/metrics_registry.h"
 
 #include <chrono>
+#include <memory>
+#include <type_traits>
 
 namespace stellar
 {
-
-FutureBucket::FutureBucket(Application& app,
-                           std::shared_ptr<Bucket> const& curr,
-                           std::shared_ptr<Bucket> const& snap,
-                           std::vector<std::shared_ptr<Bucket>> const& shadows,
-                           uint32_t maxProtocolVersion, bool countMergeEvents,
-                           uint32_t level)
+template <class BucketT>
+FutureBucket<BucketT>::FutureBucket(
+    Application& app, std::shared_ptr<BucketT> const& curr,
+    std::shared_ptr<BucketT> const& snap,
+    std::vector<std::shared_ptr<BucketT>> const& shadows,
+    uint32_t maxProtocolVersion, bool countMergeEvents, uint32_t level)
     : mState(FB_LIVE_INPUTS)
     , mInputCurrBucket(curr)
     , mInputSnapBucket(snap)
@@ -48,8 +50,8 @@ FutureBucket::FutureBucket(Application& app,
     releaseAssert(snap);
     mInputCurrBucketHash = binToHex(curr->getHash());
     mInputSnapBucketHash = binToHex(snap->getHash());
-    if (protocolVersionStartsFrom(Bucket::getBucketVersion(snap),
-                                  Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+    if (protocolVersionStartsFrom(snap->getBucketVersion(),
+                                  LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
     {
         if (!mInputShadowBuckets.empty())
         {
@@ -57,6 +59,20 @@ FutureBucket::FutureBucket(Application& app,
                 "Invalid FutureBucket: ledger version doesn't support shadows");
         }
     }
+
+    if constexpr (!std::is_same_v<BucketT, LiveBucket>)
+    {
+        if (!snap->isEmpty() &&
+            protocolVersionIsBefore(
+                snap->getBucketVersion(),
+                Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION))
+        {
+            throw std::runtime_error(
+                "Invalid ArchivalFutureBucket: ledger version doesn't support "
+                "Archival BucketList");
+        }
+    }
+
     for (auto const& b : mInputShadowBuckets)
     {
         mInputShadowBucketHashes.push_back(binToHex(b->getHash()));
@@ -64,8 +80,9 @@ FutureBucket::FutureBucket(Application& app,
     startMerge(app, maxProtocolVersion, countMergeEvents, level);
 }
 
+template <class BucketT>
 void
-FutureBucket::setLiveOutput(std::shared_ptr<Bucket> output)
+FutureBucket<BucketT>::setLiveOutput(std::shared_ptr<BucketT> output)
 {
     ZoneScoped;
     mState = FB_LIVE_OUTPUT;
@@ -74,14 +91,16 @@ FutureBucket::setLiveOutput(std::shared_ptr<Bucket> output)
     checkState();
 }
 
+template <class BucketT>
 static void
-checkHashEq(std::shared_ptr<Bucket> const& b, std::string const& h)
+checkHashEq(std::shared_ptr<BucketT> const& b, std::string const& h)
 {
     releaseAssert(b->getHash() == hexToBin256(h));
 }
 
+template <class BucketT>
 void
-FutureBucket::checkHashesMatch() const
+FutureBucket<BucketT>::checkHashesMatch() const
 {
     ZoneScoped;
     if (!mInputShadowBuckets.empty())
@@ -114,8 +133,9 @@ FutureBucket::checkHashesMatch() const
  * the different hash-only states are mutually exclusive with each other and
  * with live values.
  */
+template <class BucketT>
 void
-FutureBucket::checkState() const
+FutureBucket<BucketT>::checkState() const
 {
     switch (mState)
     {
@@ -174,8 +194,9 @@ FutureBucket::checkState() const
     }
 }
 
+template <class BucketT>
 void
-FutureBucket::clearInputs()
+FutureBucket<BucketT>::clearInputs()
 {
     mInputShadowBuckets.clear();
     mInputSnapBucket.reset();
@@ -186,50 +207,57 @@ FutureBucket::clearInputs()
     mInputCurrBucketHash.clear();
 }
 
+template <class BucketT>
 void
-FutureBucket::clearOutput()
+FutureBucket<BucketT>::clearOutput()
 {
     // NB: MSVC future<> implementation doesn't purge the task lambda (and
     // its captures) on invalidation (due to get()); must explicitly reset.
-    mOutputBucketFuture = std::shared_future<std::shared_ptr<Bucket>>();
+    mOutputBucketFuture = std::shared_future<std::shared_ptr<BucketT>>();
     mOutputBucketHash.clear();
     mOutputBucket.reset();
 }
 
+template <class BucketT>
 void
-FutureBucket::clear()
+FutureBucket<BucketT>::clear()
 {
     mState = FB_CLEAR;
     clearInputs();
     clearOutput();
 }
 
+template <class BucketT>
 bool
-FutureBucket::isLive() const
+FutureBucket<BucketT>::isLive() const
 {
     return (mState == FB_LIVE_INPUTS || mState == FB_LIVE_OUTPUT);
 }
 
+template <class BucketT>
 bool
-FutureBucket::isMerging() const
+FutureBucket<BucketT>::isMerging() const
 {
     return mState == FB_LIVE_INPUTS;
 }
 
+template <class BucketT>
 bool
-FutureBucket::hasHashes() const
+FutureBucket<BucketT>::hasHashes() const
 {
     return (mState == FB_HASH_INPUTS || mState == FB_HASH_OUTPUT);
 }
 
+template <class BucketT>
 bool
-FutureBucket::isClear() const
+FutureBucket<BucketT>::isClear() const
 {
     return mState == FB_CLEAR;
 }
 
+template <class BucketT>
 bool
-FutureBucket::mergeComplete() const
+FutureBucket<BucketT>::mergeComplete() const
 {
     ZoneScoped;
     releaseAssert(isLive());
@@ -241,8 +269,9 @@ FutureBucket::mergeComplete() const
     return futureIsReady(mOutputBucketFuture);
 }
 
-std::shared_ptr<Bucket>
-FutureBucket::resolve()
+template <class BucketT>
+std::shared_ptr<BucketT>
+FutureBucket<BucketT>::resolve()
 {
     ZoneScoped;
     checkState();
@@ -264,7 +293,7 @@ FutureBucket::resolve()
         // Explicitly reset shared_future to ensure destruction of shared state.
         // Some compilers store packaged_task lambdas in the shared state,
         // keeping its captures alive as long as the future is alive.
-        mOutputBucketFuture = std::shared_future<std::shared_ptr<Bucket>>();
+        mOutputBucketFuture = std::shared_future<std::shared_ptr<BucketT>>();
     }
 
     mState = FB_LIVE_OUTPUT;
@@ -272,8 +301,9 @@ FutureBucket::resolve()
     return mOutputBucket;
 }
 
+template <class BucketT>
 bool
-FutureBucket::hasOutputHash() const
+FutureBucket<BucketT>::hasOutputHash() const
 {
     if (mState == FB_LIVE_OUTPUT || mState == FB_HASH_OUTPUT)
     {
@@ -283,28 +313,31 @@ FutureBucket::hasOutputHash() const
     return false;
 }
 
+template <class BucketT>
 std::string const&
-FutureBucket::getOutputHash() const
+FutureBucket<BucketT>::getOutputHash() const
 {
     releaseAssert(mState == FB_LIVE_OUTPUT || mState == FB_HASH_OUTPUT);
     releaseAssert(!mOutputBucketHash.empty());
     return mOutputBucketHash;
 }
 
+template <class BucketT>
 static std::chrono::seconds
 getAvailableTimeForMerge(Application& app, uint32_t level)
 {
     auto closeTime = app.getConfig().getExpectedLedgerCloseTime();
     if (level >= 1)
     {
-        return closeTime * BucketList::levelHalf(level - 1);
+        return closeTime * BucketListBase<BucketT>::levelHalf(level - 1);
     }
     return closeTime;
 }
 
+template <class BucketT>
 void
-FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion,
-                         bool countMergeEvents, uint32_t level)
+FutureBucket<BucketT>::startMerge(Application& app, uint32_t maxProtocolVersion,
+                                  bool countMergeEvents, uint32_t level)
 {
     ZoneScoped;
     // NB: startMerge starts with FutureBucket in a half-valid state; the inputs
@@ -313,9 +346,9 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion,
 
     releaseAssert(mState == FB_LIVE_INPUTS);
 
-    std::shared_ptr<Bucket> curr = mInputCurrBucket;
-    std::shared_ptr<Bucket> snap = mInputSnapBucket;
-    std::vector<std::shared_ptr<Bucket>> shadows = mInputShadowBuckets;
+    std::shared_ptr<BucketT> curr = mInputCurrBucket;
+    std::shared_ptr<BucketT> snap = mInputSnapBucket;
+    std::vector<std::shared_ptr<BucketT>> shadows = mInputShadowBuckets;
 
     releaseAssert(curr);
     releaseAssert(snap);
@@ -329,13 +362,31 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion,
     auto& timer = app.getMetrics().NewTimer(
         {"bucket", "merge-time", "level-" + std::to_string(level)});
 
+    std::vector<Hash> shadowHashes;
+    shadowHashes.reserve(shadows.size());
+    for (auto const& b : shadows)
+    {
+        shadowHashes.emplace_back(b->getHash());
+    }
+
     // It's possible we're running a merge that's already running, for example
     // due to having been serialized to the publish queue and then immediately
     // deserialized. In this case we want to attach to the existing merge, which
     // will have left a std::shared_future behind in a shared cache in the
     // bucket manager.
-    MergeKey mk{BucketList::keepDeadEntries(level), curr, snap, shadows};
-    auto f = bm.getMergeFuture(mk);
+    MergeKey mk{BucketListBase<BucketT>::keepTombstoneEntries(level),
+                curr->getHash(), snap->getHash(), shadowHashes};
+
+    std::shared_future<std::shared_ptr<BucketT>> f;
+    if constexpr (std::is_same_v<BucketT, LiveBucket>)
+    {
+        f = bm.getLiveMergeFuture(mk);
+    }
+    else
+    {
+        f = bm.getHotArchiveMergeFuture(mk);
+    }
+
     if (f.valid())
     {
         CLOG_TRACE(Bucket,
@@ -347,9 +398,10 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion,
     }
     asio::io_context& ctx = app.getWorkerIOContext();
     bool doFsync = !app.getConfig().DISABLE_XDR_FSYNC;
-    std::chrono::seconds availableTime = getAvailableTimeForMerge(app, level);
+    std::chrono::seconds availableTime =
+        getAvailableTimeForMerge<BucketT>(app, level);
 
-    using task_t = std::packaged_task<std::shared_ptr<Bucket>()>;
+    using task_t = std::packaged_task<std::shared_ptr<BucketT>()>;
     std::shared_ptr<task_t> task = std::make_shared<task_t>(
         [curr, snap, &bm, shadows, maxProtocolVersion, countMergeEvents, level,
          &timer, &ctx, doFsync, availableTime]() mutable {
@@ -362,10 +414,10 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion,
                 ZoneNamedN(mergeZone, "Merge task", true);
                 ZoneValueV(mergeZone, static_cast<int64_t>(level));
 
-                auto res =
-                    Bucket::merge(bm, maxProtocolVersion, curr, snap, shadows,
-                                  BucketList::keepDeadEntries(level),
-                                  countMergeEvents, ctx, doFsync);
+                auto res = Bucket::merge(
+                    bm, maxProtocolVersion, curr, snap, shadows,
+                    BucketListBase<BucketT>::keepTombstoneEntries(level),
+                    countMergeEvents, ctx, doFsync);
 
                 if (res)
                 {
@@ -395,15 +447,24 @@ FutureBucket::startMerge(Application& app, uint32_t maxProtocolVersion,
         });
 
     mOutputBucketFuture = task->get_future().share();
-    bm.putMergeFuture(mk, mOutputBucketFuture);
+    if constexpr (std::is_same_v<BucketT, LiveBucket>)
+    {
+        bm.putLiveMergeFuture(mk, mOutputBucketFuture);
+    }
+    else
+    {
+        bm.putHotArchiveMergeFuture(mk, mOutputBucketFuture);
+    }
+
     app.postOnBackgroundThread(bind(&task_t::operator(), task),
                                "FutureBucket: merge");
     checkState();
 }
 
+template <class BucketT>
 void
-FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion,
-                       uint32_t level)
+FutureBucket<BucketT>::makeLive(Application& app, uint32_t maxProtocolVersion,
+                                uint32_t level)
 {
     ZoneScoped;
     checkState();
@@ -412,20 +473,48 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion,
     auto& bm = app.getBucketManager();
     if (hasOutputHash())
     {
-        auto b = bm.getBucketByHash(hexToBin256(getOutputHash()));
+        std::shared_ptr<BucketT> b;
+        if constexpr (std::is_same_v<BucketT, LiveBucket>)
+        {
+            b = bm.getLiveBucketByHash(hexToBin256(getOutputHash()));
+        }
+        else
+        {
+            b = bm.getHotArchiveBucketByHash(hexToBin256(getOutputHash()));
+        }
+
         setLiveOutput(b);
     }
     else
     {
         releaseAssert(mState == FB_HASH_INPUTS);
-        mInputCurrBucket =
-            bm.getBucketByHash(hexToBin256(mInputCurrBucketHash));
-        mInputSnapBucket =
-            bm.getBucketByHash(hexToBin256(mInputSnapBucketHash));
+        if constexpr (std::is_same_v<BucketT, LiveBucket>)
+        {
+            mInputCurrBucket =
+                bm.getLiveBucketByHash(hexToBin256(mInputCurrBucketHash));
+            mInputSnapBucket =
+                bm.getLiveBucketByHash(hexToBin256(mInputSnapBucketHash));
+        }
+        else
+        {
+            mInputCurrBucket =
+                bm.getHotArchiveBucketByHash(hexToBin256(mInputCurrBucketHash));
+            mInputSnapBucket =
+                bm.getHotArchiveBucketByHash(hexToBin256(mInputSnapBucketHash));
+        }
         releaseAssert(mInputShadowBuckets.empty());
         for (auto const& h : mInputShadowBucketHashes)
         {
-            auto b = bm.getBucketByHash(hexToBin256(h));
+            std::shared_ptr<BucketT> b;
+            if constexpr (std::is_same_v<BucketT, LiveBucket>)
+            {
+                b = bm.getLiveBucketByHash(hexToBin256(h));
+            }
+            else
+            {
+                b = bm.getHotArchiveBucketByHash(hexToBin256(h));
+            }
+
             releaseAssert(b);
             CLOG_DEBUG(Bucket, "Reconstituting shadow {}", h);
             mInputShadowBuckets.push_back(b);
@@ -436,8 +525,9 @@ FutureBucket::makeLive(Application& app, uint32_t maxProtocolVersion,
     }
 }
 
+template <class BucketT>
 std::vector<std::string>
-FutureBucket::getHashes() const
+FutureBucket<BucketT>::getHashes() const
 {
     ZoneScoped;
     std::vector<std::string> hashes;
@@ -459,4 +549,7 @@ FutureBucket::getHashes() const
     }
     return hashes;
 }
+
+template class FutureBucket<LiveBucket>;
+template class FutureBucket<HotArchiveBucket>;
 }
diff --git a/src/bucket/FutureBucket.h b/src/bucket/FutureBucket.h
index 4866d90235..cda7e6b61c 100644
--- a/src/bucket/FutureBucket.h
+++ b/src/bucket/FutureBucket.h
@@ -4,6 +4,7 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/Bucket.h"
 #include "util/GlobalChecks.h"
 #include <cereal/cereal.hpp>
 #include <future>
@@ -16,13 +17,16 @@ namespace stellar
 
 class Bucket;
 class Application;
+class LiveBucket;
+class HotArchiveBucket;
 
 /**
  * FutureBucket is a minor wrapper around
- * std::shared_future<std::shared_ptr<Bucket>>, used in merging multiple buckets
- * together in the BucketList. The reason this is a separate class is that we
- * need to support a level of persistence: serializing merges-in-progress in a
- * symbolic fashion, including restarting the merges after we deserialize.
+ * std::shared_future<std::shared_ptr<LiveBucket>>, used in merging multiple
+ * buckets together in the BucketList. The reason this is a separate class is
+ * that we need to support a level of persistence: serializing
+ * merges-in-progress in a symbolic fashion, including restarting the merges
+ * after we deserialize.
  *
  * This class is therefore used not _only_ in the BucketList but also in places
  * that serialize and deserialize snapshots of it in the form of
@@ -30,8 +34,11 @@ class Application;
  * the bottom of closeLedger; and the HistoryManager, when storing and
  * retrieving HistoryArchiveStates.
  */
-class FutureBucket
+template <class BucketT> class FutureBucket
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
     // There are two lifecycles of a FutureBucket:
     //
     // In one, it's created live, snapshotted at some point in the process
@@ -56,11 +63,11 @@ class FutureBucket
     // FutureBucket is constructed, when it is reset, or when it is freshly
     // deserialized and not yet activated. When they are nonempty, they should
     // have values equal to the subsequent mFooHash values below.
-    std::shared_ptr<Bucket> mInputCurrBucket;
-    std::shared_ptr<Bucket> mInputSnapBucket;
-    std::vector<std::shared_ptr<Bucket>> mInputShadowBuckets;
-    std::shared_ptr<Bucket> mOutputBucket;
-    std::shared_future<std::shared_ptr<Bucket>> mOutputBucketFuture;
+    std::shared_ptr<BucketT> mInputCurrBucket;
+    std::shared_ptr<BucketT> mInputSnapBucket;
+    std::vector<std::shared_ptr<BucketT>> mInputShadowBuckets;
+    std::shared_ptr<BucketT> mOutputBucket;
+    std::shared_future<std::shared_ptr<BucketT>> mOutputBucketFuture;
 
     // These strings hold the serializable (or deserialized) bucket hashes of
     // the inputs and outputs of a merge; depending on the state of the
@@ -79,12 +86,12 @@ class FutureBucket
 
     void clearInputs();
     void clearOutput();
-    void setLiveOutput(std::shared_ptr<Bucket> b);
+    void setLiveOutput(std::shared_ptr<BucketT> b);
 
   public:
-    FutureBucket(Application& app, std::shared_ptr<Bucket> const& curr,
-                 std::shared_ptr<Bucket> const& snap,
-                 std::vector<std::shared_ptr<Bucket>> const& shadows,
+    FutureBucket(Application& app, std::shared_ptr<BucketT> const& curr,
+                 std::shared_ptr<BucketT> const& snap,
+                 std::vector<std::shared_ptr<BucketT>> const& shadows,
                  uint32_t maxProtocolVersion, bool countMergeEvents,
                  uint32_t level);
 
@@ -118,7 +125,7 @@ class FutureBucket
     bool mergeComplete() const;
 
     // Precondition: isLive(); waits-for and resolves to merged bucket.
-    std::shared_ptr<Bucket> resolve();
+    std::shared_ptr<BucketT> resolve();
 
     // Precondition: !isLive(); transitions from FB_HASH_FOO to FB_LIVE_FOO
     void makeLive(Application& app, uint32_t maxProtocolVersion,
diff --git a/src/bucket/LedgerCmp.h b/src/bucket/LedgerCmp.h
index 6551448f97..8a9db13df9 100644
--- a/src/bucket/LedgerCmp.h
+++ b/src/bucket/LedgerCmp.h
@@ -6,6 +6,7 @@
 
 #include <type_traits>
 
+#include "util/GlobalChecks.h"
 #include "util/XDROperators.h" // IWYU pragma: keep
 #include "xdr/Stellar-ledger-entries.h"
 #include "xdr/Stellar-ledger.h"
@@ -13,6 +14,10 @@
 namespace stellar
 {
 
+class LiveBucket;
+class HotArchiveBucket;
+class ColdArchiveBucket;
+
 template <typename T>
 bool
 lexCompare(T&& lhs1, T&& rhs1)
@@ -126,10 +131,168 @@ struct LedgerEntryIdCmp
  * LedgerEntries (ignoring their hashes, as the LedgerEntryIdCmp ignores their
  * bodies).
  */
-struct BucketEntryIdCmp
+template <typename BucketT> struct BucketEntryIdCmp
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket> ||
+                  std::is_same_v<BucketT, ColdArchiveBucket>);
+
+    using BucketEntryT = std::conditional_t<
+        std::is_same_v<BucketT, LiveBucket>, BucketEntry,
+        std::conditional_t<std::is_same_v<BucketT, HotArchiveBucket>,
+                           HotArchiveBucketEntry, ColdArchiveBucketEntry>>;
+
+    bool
+    compareHotArchive(HotArchiveBucketEntry const& a,
+                      HotArchiveBucketEntry const& b) const
+    {
+        HotArchiveBucketEntryType aty = a.type();
+        HotArchiveBucketEntryType bty = b.type();
+
+        // METAENTRY sorts below all other entries, comes first in buckets.
+        if (aty == HOT_ARCHIVE_METAENTRY || bty == HOT_ARCHIVE_METAENTRY)
+        {
+            return aty < bty;
+        }
+
+        if (aty == HOT_ARCHIVE_ARCHIVED)
+        {
+            if (bty == HOT_ARCHIVE_ARCHIVED)
+            {
+                return LedgerEntryIdCmp{}(a.archivedEntry().data,
+                                          b.archivedEntry().data);
+            }
+            else
+            {
+                if (bty != HOT_ARCHIVE_DELETED && bty != HOT_ARCHIVE_LIVE)
+                {
+                    throw std::runtime_error("Malformed bucket: expected "
+                                             "DELETED/LIVE key.");
+                }
+                return LedgerEntryIdCmp{}(a.archivedEntry().data, b.key());
+            }
+        }
+        else
+        {
+            if (aty != HOT_ARCHIVE_DELETED && aty != HOT_ARCHIVE_LIVE)
+            {
+                throw std::runtime_error(
+                    "Malformed bucket: expected DELETED/LIVE key.");
+            }
+
+            if (bty == HOT_ARCHIVE_ARCHIVED)
+            {
+                return LedgerEntryIdCmp{}(a.key(), b.archivedEntry().data);
+            }
+            else
+            {
+                if (bty != HOT_ARCHIVE_DELETED && bty != HOT_ARCHIVE_LIVE)
+                {
+                    throw std::runtime_error("Malformed bucket: expected "
+                                             "DELETED/RESTORED key.");
+                }
+                return LedgerEntryIdCmp{}(a.key(), b.key());
+            }
+        }
+    }
+
+    bool
+    compareColdArchive(ColdArchiveBucketEntry const& a,
+                       ColdArchiveBucketEntry const& b) const
+    {
+        ColdArchiveBucketEntryType aty = a.type();
+        ColdArchiveBucketEntryType bty = b.type();
+
+        // METAENTRY sorts below all other entries, comes first in buckets.
+        if (aty == COLD_ARCHIVE_METAENTRY || bty == COLD_ARCHIVE_METAENTRY)
+        {
+            return aty < bty;
+        }
+
+        if (aty == COLD_ARCHIVE_BOUNDARY_LEAF)
+        {
+            if (bty == COLD_ARCHIVE_BOUNDARY_LEAF)
+            {
+                if (a.boundaryLeaf().isLowerBound ==
+                    b.boundaryLeaf().isLowerBound)
+                {
+                    throw std::runtime_error(
+                        "Malformed bucket: multiple identical boundaries");
+                }
+            }
+
+            return a.boundaryLeaf().isLowerBound;
+        }
+
+        if (bty == COLD_ARCHIVE_BOUNDARY_LEAF)
+        {
+            return !b.boundaryLeaf().isLowerBound;
+        }
+
+        if (aty == COLD_ARCHIVE_ARCHIVED_LEAF)
+        {
+            if (bty == COLD_ARCHIVE_ARCHIVED_LEAF)
+            {
+                return LedgerEntryIdCmp{}(a.archivedLeaf().archivedEntry.data,
+                                          b.archivedLeaf().archivedEntry.data);
+            }
+            else if (bty == COLD_ARCHIVE_DELETED_LEAF)
+            {
+                return LedgerEntryIdCmp{}(a.archivedLeaf().archivedEntry.data,
+                                          b.deletedLeaf().deletedKey);
+            }
+            else
+            {
+                // leaf nodes always before merkle nodes
+                return true;
+            }
+        }
+
+        if (bty == COLD_ARCHIVE_ARCHIVED_LEAF)
+        {
+            if (aty == COLD_ARCHIVE_DELETED_LEAF)
+            {
+                return LedgerEntryIdCmp{}(a.deletedLeaf().deletedKey,
+                                          b.archivedLeaf().archivedEntry.data);
+            }
+            else
+            {
+                // leaf nodes always before merkle nodes
+                return false;
+            }
+        }
+
+        if (aty == COLD_ARCHIVE_DELETED_LEAF)
+        {
+            if (bty == COLD_ARCHIVE_DELETED_LEAF)
+            {
+                return LedgerEntryIdCmp{}(a.deletedLeaf().deletedKey,
+                                          b.deletedLeaf().deletedKey);
+            }
+            else
+            {
+                // leaf nodes always before merkle nodes
+                return true;
+            }
+        }
+
+        if (bty == COLD_ARCHIVE_DELETED_LEAF)
+        {
+            // leaf nodes always before merkle nodes
+            return false;
+        }
+
+        releaseAssert(aty == COLD_ARCHIVE_HASH && bty == COLD_ARCHIVE_HASH);
+        if (a.hashEntry().level != b.hashEntry().level)
+        {
+            return a.hashEntry().level < b.hashEntry().level;
+        }
+
+        return a.hashEntry().index < b.hashEntry().index;
+    }
+
     bool
-    operator()(BucketEntry const& a, BucketEntry const& b) const
+    compareLive(BucketEntry const& a, BucketEntry const& b) const
     {
         BucketEntryType aty = a.type();
         BucketEntryType bty = b.type();
@@ -161,8 +324,8 @@ struct BucketEntryIdCmp
         {
             if (aty != DEADENTRY)
             {
-                throw std::runtime_error(
-                    "Malformed bucket: unexpected non-INIT/LIVE/DEAD entry.");
+                throw std::runtime_error("Malformed bucket: unexpected "
+                                         "non-INIT/LIVE/DEAD entry.");
             }
             if (bty == LIVEENTRY || bty == INITENTRY)
             {
@@ -179,5 +342,23 @@ struct BucketEntryIdCmp
             }
         }
     }
+
+    bool
+    operator()(BucketEntryT const& a, BucketEntryT const& b) const
+    {
+        if constexpr (std::is_same_v<BucketT, LiveBucket>)
+        {
+            return compareLive(a, b);
+        }
+        else if constexpr (std::is_same_v<BucketT, HotArchiveBucket>)
+        {
+            return compareHotArchive(a, b);
+        }
+        else
+        {
+            static_assert(std::is_same_v<BucketT, ColdArchiveBucket>);
+            return compareColdArchive(a, b);
+        }
+    }
 };
 }
diff --git a/src/bucket/MergeKey.cpp b/src/bucket/MergeKey.cpp
index 74fc5993fb..f3932195f0 100644
--- a/src/bucket/MergeKey.cpp
+++ b/src/bucket/MergeKey.cpp
@@ -10,25 +10,19 @@
 namespace stellar
 {
 
-MergeKey::MergeKey(bool keepDeadEntries,
-                   std::shared_ptr<Bucket> const& inputCurr,
-                   std::shared_ptr<Bucket> const& inputSnap,
-                   std::vector<std::shared_ptr<Bucket>> const& inputShadows)
-    : mKeepDeadEntries(keepDeadEntries)
-    , mInputCurrBucket(inputCurr->getHash())
-    , mInputSnapBucket(inputSnap->getHash())
+MergeKey::MergeKey(bool keepTombstoneEntries, Hash const& currHash,
+                   Hash const& snapHash, std::vector<Hash> const& shadowHashes)
+    : mKeepTombstoneEntries(keepTombstoneEntries)
+    , mInputCurrBucket(currHash)
+    , mInputSnapBucket(snapHash)
+    , mInputShadowBuckets(shadowHashes)
 {
-    mInputShadowBuckets.reserve(inputShadows.size());
-    for (auto const& s : inputShadows)
-    {
-        mInputShadowBuckets.emplace_back(s->getHash());
-    }
 }
 
 bool
 MergeKey::operator==(MergeKey const& other) const
 {
-    return mKeepDeadEntries == other.mKeepDeadEntries &&
+    return mKeepTombstoneEntries == other.mKeepTombstoneEntries &&
            mInputCurrBucket == other.mInputCurrBucket &&
            mInputSnapBucket == other.mInputSnapBucket &&
            mInputShadowBuckets == other.mInputShadowBuckets;
@@ -49,7 +43,7 @@ operator<<(std::ostream& out, MergeKey const& b)
         first = false;
         out << hexAbbrev(s);
     }
-    out << fmt::format(FMT_STRING("], keep={}]"), b.mKeepDeadEntries);
+    out << fmt::format(FMT_STRING("], keep={}]"), b.mKeepTombstoneEntries);
     return out;
 }
 
@@ -68,7 +62,7 @@ size_t
 hash<stellar::MergeKey>::operator()(stellar::MergeKey const& key) const noexcept
 {
     std::ostringstream oss;
-    oss << key.mKeepDeadEntries << ','
+    oss << key.mKeepTombstoneEntries << ','
         << stellar::binToHex(key.mInputCurrBucket) << ','
         << stellar::binToHex(key.mInputSnapBucket);
     for (auto const& e : key.mInputShadowBuckets)
diff --git a/src/bucket/MergeKey.h b/src/bucket/MergeKey.h
index e9098f26ac..d33a73672b 100644
--- a/src/bucket/MergeKey.h
+++ b/src/bucket/MergeKey.h
@@ -17,11 +17,10 @@ namespace stellar
 // pre-resolved std::shared_future containing that output.
 struct MergeKey
 {
-    MergeKey(bool keepDeadEntries, std::shared_ptr<Bucket> const& inputCurr,
-             std::shared_ptr<Bucket> const& inputSnap,
-             std::vector<std::shared_ptr<Bucket>> const& inputShadows);
+    MergeKey(bool keepTombstoneEntries, Hash const& currHash,
+             Hash const& snapHash, std::vector<Hash> const& shadowHashes);
 
-    bool mKeepDeadEntries;
+    bool mKeepTombstoneEntries;
     Hash mInputCurrBucket;
     Hash mInputSnapBucket;
     std::vector<Hash> mInputShadowBuckets;
diff --git a/src/bucket/readme.md b/src/bucket/readme.md
index 29d4b81bd8..34439828e7 100644
--- a/src/bucket/readme.md
+++ b/src/bucket/readme.md
@@ -83,8 +83,6 @@ for smaller memory overhead.
 Because the `BucketIndex`'s must be in memory, there is a tradeoff between BucketList
 lookup speed and memory overhead. The following configuration flags control these options:
 
-- `DEPRECATED_SQL_LEDGER_STATE`
-  - When set to false, the `BucketList` is indexed and used for ledger entry lookup
 - `BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT`
   - Page size used for `RangeIndex`, where `pageSize ==
     2^BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT`.
diff --git a/src/bucket/test/BucketIndexTests.cpp b/src/bucket/test/BucketIndexTests.cpp
index 0a045e7762..28f7ea903c 100644
--- a/src/bucket/test/BucketIndexTests.cpp
+++ b/src/bucket/test/BucketIndexTests.cpp
@@ -16,6 +16,12 @@
 #include "main/Config.h"
 #include "test/test.h"
 
+#include "util/ProtocolVersion.h"
+#include "util/UnorderedMap.h"
+#include "util/UnorderedSet.h"
+#include "util/XDRCereal.h"
+#include "util/types.h"
+
 using namespace stellar;
 using namespace BucketTestUtils;
 
@@ -69,7 +75,7 @@ class BucketIndexTest
                     {CONFIG_SETTING}, 10);
             f(entries);
             closeLedger(*mApp);
-        } while (!BucketList::levelShouldSpill(ledger, mLevelsToBuild - 1));
+        } while (!LiveBucketList::levelShouldSpill(ledger, mLevelsToBuild - 1));
     }
 
   public:
@@ -129,7 +135,7 @@ class BucketIndexTest
 
         auto searchableBL = getBM()
                                 .getBucketSnapshotManager()
-                                .copySearchableBucketListSnapshot();
+                                .copySearchableLiveBucketListSnapshot();
         auto lk = LedgerEntryKey(canonicalEntry);
 
         auto currentLoadedEntry = searchableBL->load(lk);
@@ -143,20 +149,18 @@ class BucketIndexTest
 
         for (uint32_t currLedger = ledger; currLedger > 0; --currLedger)
         {
-            auto [loadRes, snapshotExists] =
-                searchableBL->loadKeysFromLedger({lk}, currLedger);
+            auto loadRes = searchableBL->loadKeysFromLedger({lk}, currLedger);
 
             // If we query an older snapshot, should return <null, notFound>
             if (currLedger < ledger - mApp->getConfig().QUERY_SNAPSHOT_LEDGERS)
             {
-                REQUIRE(!snapshotExists);
-                REQUIRE(loadRes.empty());
+                REQUIRE(!loadRes);
             }
             else
             {
-                REQUIRE(snapshotExists);
-                REQUIRE(loadRes.size() == 1);
-                REQUIRE(loadRes[0].lastModifiedLedgerSeq == currLedger - 1);
+                REQUIRE(loadRes);
+                REQUIRE(loadRes->size() == 1);
+                REQUIRE(loadRes->at(0).lastModifiedLedgerSeq == currLedger - 1);
             }
         }
     }
@@ -250,7 +254,7 @@ class BucketIndexTest
     {
         auto searchableBL = getBM()
                                 .getBucketSnapshotManager()
-                                .copySearchableBucketListSnapshot();
+                                .copySearchableLiveBucketListSnapshot();
 
         // Test bulk load lookup
         auto loadResult =
@@ -277,7 +281,7 @@ class BucketIndexTest
     {
         auto searchableBL = getBM()
                                 .getBucketSnapshotManager()
-                                .copySearchableBucketListSnapshot();
+                                .copySearchableLiveBucketListSnapshot();
         for (size_t i = 0; i < n; ++i)
         {
             LedgerKeySet searchSubset;
@@ -317,7 +321,7 @@ class BucketIndexTest
     {
         auto searchableBL = getBM()
                                 .getBucketSnapshotManager()
-                                .copySearchableBucketListSnapshot();
+                                .copySearchableLiveBucketListSnapshot();
 
         // Load should return empty vector for keys not in bucket list
         auto keysNotInBL =
@@ -494,7 +498,7 @@ class BucketIndexPoolShareTest : public BucketIndexTest
     {
         auto searchableBL = getBM()
                                 .getBucketSnapshotManager()
-                                .copySearchableBucketListSnapshot();
+                                .copySearchableLiveBucketListSnapshot();
         auto loadResult =
             searchableBL->loadPoolShareTrustLinesByAccountAndAsset(
                 mAccountToSearch.accountID, mAssetToSearch);
@@ -508,7 +512,6 @@ testAllIndexTypes(std::function<void(Config&)> f)
     SECTION("individual index only")
     {
         Config cfg(getTestConfig());
-        cfg.DEPRECATED_SQL_LEDGER_STATE = false;
         cfg.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 0;
         f(cfg);
     }
@@ -516,7 +519,6 @@ testAllIndexTypes(std::function<void(Config&)> f)
     SECTION("individual and range index")
     {
         Config cfg(getTestConfig());
-        cfg.DEPRECATED_SQL_LEDGER_STATE = false;
 
         // First 3 levels individual, last 3 range index
         cfg.BUCKETLIST_DB_INDEX_CUTOFF = 1;
@@ -526,7 +528,6 @@ testAllIndexTypes(std::function<void(Config&)> f)
     SECTION("range index only")
     {
         Config cfg(getTestConfig());
-        cfg.DEPRECATED_SQL_LEDGER_STATE = false;
         cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0;
         f(cfg);
     }
@@ -608,7 +609,6 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]")
 
     // All levels use range config
     cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0;
-    cfg.DEPRECATED_SQL_LEDGER_STATE = false;
     cfg.BUCKETLIST_DB_PERSIST_INDEX = true;
     cfg.INVARIANT_CHECKS = {};
 
@@ -631,7 +631,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]")
         auto indexFilename = test.getBM().bucketIndexFilename(bucketHash);
         REQUIRE(fs::exists(indexFilename));
 
-        auto b = test.getBM().getBucketByHash(bucketHash);
+        auto b = test.getBM().getLiveBucketByHash(bucketHash);
         REQUIRE(b->isIndexed());
 
         auto onDiskIndex =
@@ -657,7 +657,7 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]")
         }
 
         // Check if in-memory index has correct params
-        auto b = test.getBM().getBucketByHash(bucketHash);
+        auto b = test.getBM().getLiveBucketByHash(bucketHash);
         REQUIRE(!b->isEmpty());
         REQUIRE(b->isIndexed());
 
@@ -682,4 +682,201 @@ TEST_CASE("serialize bucket indexes", "[bucket][bucketindex]")
         REQUIRE((inMemoryIndex == *onDiskIndex));
     }
 }
+
+// The majority of BucketListDB functionality is shared by all bucketlist types.
+// This test is a simple sanity check and tests the interface differences
+// between the live bucketlist and the hot archive bucketlist.
+TEST_CASE("hot archive bucket lookups", "[bucket][bucketindex][archive]")
+{
+    auto f = [&](Config& cfg) {
+        auto clock = VirtualClock();
+        auto app = createTestApplication<BucketTestApplication>(clock, cfg);
+
+        UnorderedMap<LedgerKey, LedgerEntry> expectedArchiveEntries;
+        UnorderedSet<LedgerKey> expectedDeletedEntries;
+        UnorderedSet<LedgerKey> expectedRestoredEntries;
+        UnorderedSet<LedgerKey> keysToSearch;
+
+        auto ledger = 1;
+
+        // Use snapshot across ledger to test update behavior
+        auto searchableBL = app->getBucketManager()
+                                .getBucketSnapshotManager()
+                                .copySearchableHotArchiveBucketListSnapshot();
+
+        auto checkLoad = [&](LedgerKey const& k,
+                             std::shared_ptr<HotArchiveBucketEntry> entryPtr) {
+            // Restored entries should be null
+            if (expectedRestoredEntries.find(k) !=
+                expectedRestoredEntries.end())
+            {
+                REQUIRE(!entryPtr);
+            }
+
+            // Deleted entries should be HotArchiveBucketEntry of type
+            // DELETED
+            else if (expectedDeletedEntries.find(k) !=
+                     expectedDeletedEntries.end())
+            {
+                REQUIRE(entryPtr);
+                REQUIRE(entryPtr->type() ==
+                        HotArchiveBucketEntryType::HOT_ARCHIVE_DELETED);
+                REQUIRE(entryPtr->key() == k);
+            }
+
+            // Archived entries should contain full LedgerEntry
+            else
+            {
+                auto expectedIter = expectedArchiveEntries.find(k);
+                REQUIRE(expectedIter != expectedArchiveEntries.end());
+                REQUIRE(entryPtr);
+                REQUIRE(entryPtr->type() ==
+                        HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED);
+                REQUIRE(entryPtr->archivedEntry() == expectedIter->second);
+            }
+        };
+
+        auto checkResult = [&] {
+            LedgerKeySet bulkLoadKeys;
+            for (auto const& k : keysToSearch)
+            {
+                auto entryPtr = searchableBL->load(k);
+                checkLoad(k, entryPtr);
+                bulkLoadKeys.emplace(k);
+            }
+
+            auto bulkLoadResult = searchableBL->loadKeys(bulkLoadKeys);
+            for (auto entry : bulkLoadResult)
+            {
+                if (entry.type() == HOT_ARCHIVE_DELETED)
+                {
+                    auto k = entry.key();
+                    auto iter = expectedDeletedEntries.find(k);
+                    REQUIRE(iter != expectedDeletedEntries.end());
+                    expectedDeletedEntries.erase(iter);
+                }
+                else
+                {
+                    REQUIRE(entry.type() == HOT_ARCHIVE_ARCHIVED);
+                    auto le = entry.archivedEntry();
+                    auto k = LedgerEntryKey(le);
+                    auto iter = expectedArchiveEntries.find(k);
+                    REQUIRE(iter != expectedArchiveEntries.end());
+                    REQUIRE(iter->second == le);
+                    expectedArchiveEntries.erase(iter);
+                }
+            }
+
+            REQUIRE(expectedDeletedEntries.empty());
+            REQUIRE(expectedArchiveEntries.empty());
+        };
+
+        auto archivedEntries =
+            LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                {CONTRACT_DATA, CONTRACT_CODE}, 10);
+        for (auto const& e : archivedEntries)
+        {
+            auto k = LedgerEntryKey(e);
+            expectedArchiveEntries.emplace(k, e);
+            keysToSearch.emplace(k);
+        }
+
+        // Note: keys to search automatically populated by these functions
+        auto deletedEntries =
+            LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                {CONTRACT_DATA, CONTRACT_CODE}, 10, keysToSearch);
+        for (auto const& k : deletedEntries)
+        {
+            expectedDeletedEntries.emplace(k);
+        }
+
+        auto restoredEntries =
+            LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                {CONTRACT_DATA, CONTRACT_CODE}, 10, keysToSearch);
+        for (auto const& k : restoredEntries)
+        {
+            expectedRestoredEntries.emplace(k);
+        }
+
+        auto header =
+            app->getLedgerManager().getLastClosedLedgerHeader().header;
+        header.ledgerSeq += 1;
+        header.ledgerVersion = static_cast<uint32_t>(
+            Bucket::FIRST_PROTOCOL_SUPPORTING_PERSISTENT_EVICTION);
+        addHotArchiveBatchAndUpdateSnapshot(*app, header, archivedEntries,
+                                            restoredEntries, deletedEntries);
+        checkResult();
+
+        // Add a few batches so that entries are no longer in the top bucket
+        for (auto i = 0; i < 100; ++i)
+        {
+            header.ledgerSeq += 1;
+            addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, {}, {});
+        }
+
+        // Shadow entries via liveEntry
+        auto liveShadow1 = LedgerEntryKey(archivedEntries[0]);
+        auto liveShadow2 = deletedEntries[1];
+
+        header.ledgerSeq += 1;
+        addHotArchiveBatchAndUpdateSnapshot(*app, header, {},
+                                            {liveShadow1, liveShadow2}, {});
+
+        // Point load
+        for (auto const& k : {liveShadow1, liveShadow2})
+        {
+            auto entryPtr = searchableBL->load(k);
+            REQUIRE(!entryPtr);
+        }
+
+        // Bulk load
+        auto bulkLoadResult =
+            searchableBL->loadKeys({liveShadow1, liveShadow2});
+        REQUIRE(bulkLoadResult.size() == 0);
+
+        // Shadow via deletedEntry
+        auto deletedShadow = LedgerEntryKey(archivedEntries[1]);
+
+        header.ledgerSeq += 1;
+        addHotArchiveBatchAndUpdateSnapshot(*app, header, {}, {},
+                                            {deletedShadow});
+
+        // Point load
+        auto entryPtr = searchableBL->load(deletedShadow);
+        REQUIRE(entryPtr);
+        REQUIRE(entryPtr->type() ==
+                HotArchiveBucketEntryType::HOT_ARCHIVE_DELETED);
+        REQUIRE(entryPtr->key() == deletedShadow);
+
+        // Bulk load
+        auto bulkLoadResult2 = searchableBL->loadKeys({deletedShadow});
+        REQUIRE(bulkLoadResult2.size() == 1);
+        REQUIRE(bulkLoadResult2[0].type() == HOT_ARCHIVE_DELETED);
+        REQUIRE(bulkLoadResult2[0].key() == deletedShadow);
+
+        // Shadow via archivedEntry
+        auto archivedShadow = archivedEntries[3];
+        archivedShadow.lastModifiedLedgerSeq = ledger;
+
+        header.ledgerSeq += 1;
+        addHotArchiveBatchAndUpdateSnapshot(*app, header, {archivedShadow}, {},
+                                            {});
+
+        // Point load
+        entryPtr = searchableBL->load(LedgerEntryKey(archivedShadow));
+        REQUIRE(entryPtr);
+        REQUIRE(entryPtr->type() ==
+                HotArchiveBucketEntryType::HOT_ARCHIVE_ARCHIVED);
+        REQUIRE(entryPtr->archivedEntry() == archivedShadow);
+
+        // Bulk load
+        auto bulkLoadResult3 =
+            searchableBL->loadKeys({LedgerEntryKey(archivedShadow)});
+        REQUIRE(bulkLoadResult3.size() == 1);
+        REQUIRE(bulkLoadResult3[0].type() == HOT_ARCHIVE_ARCHIVED);
+        REQUIRE(bulkLoadResult3[0].archivedEntry() == archivedShadow);
+    };
+
+    testAllIndexTypes(f);
+}
 }
diff --git a/src/bucket/test/BucketListTests.cpp b/src/bucket/test/BucketListTests.cpp
index 2730efcca6..fb086cc1b9 100644
--- a/src/bucket/test/BucketListTests.cpp
+++ b/src/bucket/test/BucketListTests.cpp
@@ -26,9 +26,13 @@
 #include "main/Config.h"
 #include "test/TestUtils.h"
 #include "test/test.h"
+#include "util/Fs.h"
 #include "util/Math.h"
 #include "util/ProtocolVersion.h"
 #include "util/Timer.h"
+#include "util/UnorderedMap.h"
+#include "util/UnorderedSet.h"
+#include "xdr/Stellar-ledger.h"
 #include "xdrpp/autocheck.h"
 
 #include <deque>
@@ -67,29 +71,29 @@ highBoundInclusive(uint32_t level, uint32_t ledger)
 }
 
 void
-checkBucketSizeAndBounds(BucketList& bl, uint32_t ledgerSeq, uint32_t level,
+checkBucketSizeAndBounds(LiveBucketList& bl, uint32_t ledgerSeq, uint32_t level,
                          bool isCurr)
 {
-    std::shared_ptr<Bucket> bucket;
+    std::shared_ptr<LiveBucket> bucket;
     uint32_t sizeOfBucket = 0;
     uint32_t oldestLedger = 0;
     if (isCurr)
     {
         bucket = bl.getLevel(level).getCurr();
-        sizeOfBucket = BucketList::sizeOfCurr(ledgerSeq, level);
-        oldestLedger = BucketList::oldestLedgerInCurr(ledgerSeq, level);
+        sizeOfBucket = LiveBucketList::sizeOfCurr(ledgerSeq, level);
+        oldestLedger = LiveBucketList::oldestLedgerInCurr(ledgerSeq, level);
     }
     else
     {
         bucket = bl.getLevel(level).getSnap();
-        sizeOfBucket = BucketList::sizeOfSnap(ledgerSeq, level);
-        oldestLedger = BucketList::oldestLedgerInSnap(ledgerSeq, level);
+        sizeOfBucket = LiveBucketList::sizeOfSnap(ledgerSeq, level);
+        oldestLedger = LiveBucketList::oldestLedgerInSnap(ledgerSeq, level);
     }
 
     std::set<uint32_t> ledgers;
     uint32_t lbound = std::numeric_limits<uint32_t>::max();
     uint32_t ubound = 0;
-    for (BucketInputIterator iter(bucket); iter; ++iter)
+    for (LiveBucketInputIterator iter(bucket); iter; ++iter)
     {
         auto lastModified = (*iter).liveEntry().lastModifiedLedgerSeq;
         ledgers.insert(lastModified);
@@ -129,66 +133,104 @@ binarySearchForLedger(uint32_t lbound, uint32_t ubound,
 
 using namespace BucketListTests;
 
-TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]")
+template <class BucketListT>
+static void
+basicBucketListTest()
 {
     VirtualClock clock;
     Config const& cfg = getTestConfig();
-    try
-    {
-        for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
+
+    auto test = [&](Config const& cfg) {
+        try
+        {
             Application::pointer app = createTestApplication(clock, cfg);
-            BucketList bl;
+            BucketListT bl;
             CLOG_DEBUG(Bucket, "Adding batches to bucket list");
+
+            UnorderedSet<LedgerKey> seenKeys;
             for (uint32_t i = 1;
                  !app->getClock().getIOContext().stopped() && i < 130; ++i)
             {
                 app->getClock().crank(false);
-                auto lh =
-                    app->getLedgerManager().getLastClosedLedgerHeader().header;
-                lh.ledgerSeq = i;
-                addBatchAndUpdateSnapshot(
-                    bl, *app, lh, {},
-                    LedgerTestUtils::
-                        generateValidUniqueLedgerEntriesWithExclusions(
-                            {CONFIG_SETTING}, 8),
-                    LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
-                        {CONFIG_SETTING}, 5));
+                if constexpr (std::is_same_v<BucketListT, LiveBucketList>)
+                {
+                    bl.addBatch(
+                        *app, i, getAppLedgerVersion(app), {},
+                        LedgerTestUtils::generateValidUniqueLedgerEntries(8),
+                        LedgerTestUtils::
+                            generateValidLedgerEntryKeysWithExclusions(
+                                {CONFIG_SETTING}, 5));
+                }
+                else
+                {
+                    bl.addBatch(
+                        *app, i, getAppLedgerVersion(app), {},
+                        LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                            {CONTRACT_CODE, CONTRACT_DATA}, 8, seenKeys),
+                        LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                            {CONTRACT_CODE, CONTRACT_DATA}, 5, seenKeys));
+                }
+
                 if (i % 10 == 0)
                     CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i,
                                binToHex(bl.getHash()));
-                for (uint32_t j = 0; j < BucketList::kNumLevels; ++j)
+                for (uint32_t j = 0; j < BucketListT::kNumLevels; ++j)
                 {
                     auto const& lev = bl.getLevel(j);
                     auto currSz = countEntries(lev.getCurr());
                     auto snapSz = countEntries(lev.getSnap());
-                    CHECK(currSz <= BucketList::levelHalf(j) * 100);
-                    CHECK(snapSz <= BucketList::levelHalf(j) * 100);
+                    CHECK(currSz <= BucketListT::levelHalf(j) * 100);
+                    CHECK(snapSz <= BucketListT::levelHalf(j) * 100);
                 }
             }
-        });
+        }
+        catch (std::future_error& e)
+        {
+            CLOG_DEBUG(Bucket, "Test caught std::future_error {}: {}",
+                       e.code().value(), e.what());
+            REQUIRE(false);
+        }
+    };
+
+    if constexpr (std::is_same_v<BucketListT, LiveBucketList>)
+    {
+        for_versions_with_differing_bucket_logic(cfg, test);
     }
-    catch (std::future_error& e)
+    else
     {
-        CLOG_DEBUG(Bucket, "Test caught std::future_error {}: {}",
-                   e.code().value(), e.what());
-        REQUIRE(false);
+        for_versions_from(23, cfg, test);
     }
 }
 
-TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]")
+TEST_CASE_VERSIONS("bucket list", "[bucket][bucketlist]")
+{
+    SECTION("live bl")
+    {
+        basicBucketListTest<LiveBucketList>();
+    }
+
+    SECTION("hot archive bl")
+    {
+        basicBucketListTest<HotArchiveBucketList>();
+    }
+}
+
+template <class BucketListT>
+static void
+updatePeriodTest()
 {
     std::map<uint32_t, uint32_t> currCalculatedUpdatePeriods;
     std::map<uint32_t, uint32_t> snapCalculatedUpdatePeriods;
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < BucketListT::kNumLevels; ++i)
     {
         currCalculatedUpdatePeriods.emplace(
-            i, BucketList::bucketUpdatePeriod(i, /*isCurr=*/true));
+            i, BucketListT::bucketUpdatePeriod(i, /*isCurr=*/true));
 
         // Last level has no snap
-        if (i != BucketList::kNumLevels - 1)
+        if (i != BucketListT::kNumLevels - 1)
         {
             snapCalculatedUpdatePeriods.emplace(
-                i, BucketList::bucketUpdatePeriod(i, /*isSnap=*/false));
+                i, BucketListT::bucketUpdatePeriod(i, /*isSnap=*/false));
         }
     }
 
@@ -197,7 +239,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]")
                                  !snapCalculatedUpdatePeriods.empty();
          ++ledgerSeq)
     {
-        for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+        for (uint32_t level = 0; level < BucketListT::kNumLevels; ++level)
         {
             // Check if curr bucket is updated
             auto currIter = currCalculatedUpdatePeriods.find(level);
@@ -213,7 +255,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]")
                 {
                     // For all other levels, an update occurs when the level
                     // above spills
-                    if (BucketList::levelShouldSpill(ledgerSeq, level - 1))
+                    if (BucketListT::levelShouldSpill(ledgerSeq, level - 1))
                     {
                         REQUIRE(currIter->second == ledgerSeq);
                         currCalculatedUpdatePeriods.erase(currIter);
@@ -225,7 +267,7 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]")
             auto snapIter = snapCalculatedUpdatePeriods.find(level);
             if (snapIter != snapCalculatedUpdatePeriods.end())
             {
-                if (BucketList::levelShouldSpill(ledgerSeq, level))
+                if (BucketListT::levelShouldSpill(ledgerSeq, level))
                 {
                     // Check that snap bucket calculation is correct
                     REQUIRE(snapIter->second == ledgerSeq);
@@ -236,6 +278,19 @@ TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]")
     }
 }
 
+TEST_CASE("bucketUpdatePeriod arithmetic", "[bucket][bucketlist]")
+{
+    SECTION("live bl")
+    {
+        updatePeriodTest<LiveBucketList>();
+    }
+
+    SECTION("hot archive bl")
+    {
+        updatePeriodTest<HotArchiveBucketList>();
+    }
+}
+
 TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
                    "[bucket][bucketlist]")
 {
@@ -243,7 +298,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
     Config const& cfg = getTestConfig();
     for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
         Application::pointer app = createTestApplication(clock, cfg);
-        BucketList bl;
+        LiveBucketList bl;
 
         // Alice and Bob change in every iteration.
         auto alice = LedgerTestUtils::generateValidAccountEntry(5);
@@ -258,8 +313,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
         {
             app->getClock().crank(false);
             auto liveBatch =
-                LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
-                    {CONFIG_SETTING}, 5);
+                LedgerTestUtils::generateValidUniqueLedgerEntries(5);
 
             BucketEntry BucketEntryAlice, BucketEntryBob;
             alice.balance++;
@@ -274,11 +328,8 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
             BucketEntryBob.liveEntry().data.account() = bob;
             liveBatch.push_back(BucketEntryBob.liveEntry());
 
-            auto lh =
-                app->getLedgerManager().getLastClosedLedgerHeader().header;
-            lh.ledgerSeq = i;
-            addBatchAndUpdateSnapshot(
-                bl, *app, lh, {}, liveBatch,
+            bl.addBatch(
+                *app, i, getAppLedgerVersion(app), {}, liveBatch,
                 LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
                     {CONFIG_SETTING}, 5));
             if (i % 100 == 0)
@@ -304,7 +355,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
 
                 // Alice and Bob should never occur in level 2 .. N because they
                 // were shadowed in level 0 continuously.
-                for (uint32_t j = 2; j < BucketList::kNumLevels; ++j)
+                for (uint32_t j = 2; j < LiveBucketList::kNumLevels; ++j)
                 {
                     auto const& lev = bl.getLevel(j);
                     auto curr = lev.getCurr();
@@ -317,7 +368,7 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
                          snap->containsBucketIdentity(BucketEntryBob));
                     if (protocolVersionIsBefore(
                             app->getConfig().LEDGER_PROTOCOL_VERSION,
-                            Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) ||
+                            LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) ||
                         j > 5)
                     {
                         CHECK(!hasAlice);
@@ -337,7 +388,240 @@ TEST_CASE_VERSIONS("bucket list shadowing pre/post proto 12",
     });
 }
 
-TEST_CASE_VERSIONS("bucket tombstones expire at bottom level",
+TEST_CASE("hot archive bucketlist merges in ColdArchive bucket",
+          "[bucket][bucketlist][archival]")
+{
+    VirtualClock clock;
+    Config const& cfg = getTestConfig();
+    testutil::BucketListDepthModifier<HotArchiveBucket> bldm(6);
+    auto app = createTestApplication(clock, cfg);
+    HotArchiveBucketList bl;
+
+    auto lastBucketSize = [&] {
+        auto& level = bl.getLevel(HotArchiveBucketList::kNumLevels - 1);
+        return countEntries(level.getCurr());
+    };
+
+    // Populate a full BucketList
+    UnorderedSet<LedgerKey> keys;
+    UnorderedSet<LedgerKey> expectedDeadKeys;
+    UnorderedMap<LedgerKey, LedgerEntry> expectedArchivedEntries;
+
+    auto ledger = 1;
+    while (lastBucketSize() == 0)
+    {
+        auto deletedKeys =
+            LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                {CONTRACT_CODE, CONTRACT_DATA}, 5, keys);
+        auto archivedEntries =
+            LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                {CONTRACT_CODE, CONTRACT_DATA}, 5, keys);
+        auto restoredKeys =
+            LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                {CONTRACT_CODE, CONTRACT_DATA}, 5, keys);
+
+        // Randomly shadow some entries
+        // Shadow some with a recreation event so they are dropped.
+        if (ledger > 1)
+        {
+            auto deadIter = expectedDeadKeys.begin();
+            std::advance(deadIter,
+                         rand_uniform<size_t>(0, expectedDeadKeys.size() - 1));
+
+            auto shadowWithRecreation = *deadIter;
+            expectedDeadKeys.erase(deadIter);
+
+            auto archivedIter = expectedArchivedEntries.begin();
+            std::advance(
+                archivedIter,
+                rand_uniform<size_t>(0, expectedArchivedEntries.size() - 1));
+            auto shadowWithDelete = archivedIter->first;
+            expectedArchivedEntries.erase(archivedIter);
+
+            deletedKeys.emplace_back(shadowWithDelete);
+            restoredKeys.emplace_back(shadowWithRecreation);
+        }
+
+        bl.addBatch(*app, ledger, getAppLedgerVersion(app), archivedEntries,
+                    restoredKeys, deletedKeys);
+
+        for (auto const& key : deletedKeys)
+        {
+            expectedDeadKeys.insert(key);
+        }
+
+        for (auto const& entry : archivedEntries)
+        {
+            expectedArchivedEntries.emplace(LedgerEntryKey(entry), entry);
+        }
+
+        ++ledger;
+    }
+
+    auto testBucket = [&](auto coldBucket) {
+        EntryCounts counts(coldBucket);
+        REQUIRE(counts.nDead == expectedDeadKeys.size());
+        REQUIRE(counts.nInitOrArchived == expectedArchivedEntries.size());
+
+        // Meta entry plus lower/upper bound entries
+        REQUIRE(counts.nMeta == 3);
+
+        bool seenLowerBound = false;
+        bool seenUpperBound = false;
+        uint32_t currIndex = 0;
+        for (ColdArchiveBucketInputIterator iter(coldBucket); iter; ++iter)
+        {
+            auto be = *iter;
+            if (!seenLowerBound)
+            {
+                REQUIRE(be.type() == stellar::COLD_ARCHIVE_BOUNDARY_LEAF);
+                REQUIRE(be.boundaryLeaf().isLowerBound);
+                REQUIRE(be.boundaryLeaf().index == currIndex);
+                seenLowerBound = true;
+            }
+            // If we've seen the lower bound and there are no more expected
+            // keys, we should see the upper bound
+            else if (expectedArchivedEntries.empty() &&
+                     expectedDeadKeys.empty())
+            {
+                REQUIRE(!seenUpperBound);
+                REQUIRE(be.type() == stellar::COLD_ARCHIVE_BOUNDARY_LEAF);
+                REQUIRE(!be.boundaryLeaf().isLowerBound);
+                REQUIRE(be.boundaryLeaf().index == currIndex);
+                seenUpperBound = true;
+            }
+            else
+            {
+                if (be.type() == COLD_ARCHIVE_DELETED_LEAF)
+                {
+                    REQUIRE(
+                        expectedDeadKeys.find(be.deletedLeaf().deletedKey) !=
+                        expectedDeadKeys.end());
+                    REQUIRE(be.deletedLeaf().index == currIndex);
+                    expectedDeadKeys.erase(be.deletedLeaf().deletedKey);
+                }
+                else
+                {
+                    REQUIRE(be.type() == COLD_ARCHIVE_ARCHIVED_LEAF);
+                    auto expectedIter = expectedArchivedEntries.find(
+                        LedgerEntryKey(be.archivedLeaf().archivedEntry));
+                    REQUIRE(expectedIter != expectedArchivedEntries.end());
+                    REQUIRE(expectedIter->second ==
+                            be.archivedLeaf().archivedEntry);
+                    REQUIRE(be.archivedLeaf().index == currIndex);
+                    expectedArchivedEntries.erase(
+                        LedgerEntryKey(be.archivedLeaf().archivedEntry));
+                }
+            }
+
+            ++currIndex;
+        }
+
+        REQUIRE(expectedDeadKeys.empty());
+        REQUIRE(expectedArchivedEntries.empty());
+        REQUIRE(seenLowerBound);
+        REQUIRE(seenUpperBound);
+    };
+
+    SECTION("merge")
+    {
+        PendingColdArchive pending(*app, bl, 0, cfg.LEDGER_PROTOCOL_VERSION);
+        auto coldBucket = pending.resolve();
+        testBucket(coldBucket);
+    }
+
+    SECTION("reattach to existing bucket")
+    {
+        // First create the bucket
+        {
+            PendingColdArchive pending(*app, bl, 10,
+                                       cfg.LEDGER_PROTOCOL_VERSION);
+            auto coldBucket = pending.resolve();
+            REQUIRE(fs::exists(coldBucket->getFilename()));
+        }
+
+        SECTION("bucket in memory")
+        {
+            PendingColdArchive pending(*app, bl, 10,
+                                       cfg.LEDGER_PROTOCOL_VERSION);
+            auto coldBucket = pending.resolve();
+            testBucket(coldBucket);
+            REQUIRE(fs::exists(coldBucket->getFilename()));
+        }
+
+        // TODO: test reattaching to a bucket that is not in memory after
+        // history has been implemented, also test garbage collection
+    }
+}
+
+TEST_CASE_VERSIONS("hot archive bucket tombstones expire at bottom level",
+                   "[bucket][bucketlist][tombstones]")
+{
+    VirtualClock clock;
+    Config const& cfg = getTestConfig();
+
+    testutil::BucketListDepthModifier<HotArchiveBucket> bldm(5);
+    auto app = createTestApplication(clock, cfg);
+    for_versions_from(23, *app, [&] {
+        HotArchiveBucketList bl;
+
+        auto lastSnapSize = [&] {
+            auto& level = bl.getLevel(HotArchiveBucketList::kNumLevels - 2);
+            return countEntries(level.getSnap());
+        };
+
+        auto countNonBottomLevelEntries = [&] {
+            auto size = 0;
+            for (uint32_t i = 0; i < HotArchiveBucketList::kNumLevels - 1; ++i)
+            {
+                auto& level = bl.getLevel(i);
+                size += countEntries(level.getCurr());
+                size += countEntries(level.getSnap());
+            }
+            return size;
+        };
+
+        // Populate a BucketList so everything but the bottom level is full.
+        UnorderedSet<LedgerKey> keys;
+        auto numExpectedEntries = 0;
+        auto ledger = 1;
+        while (lastSnapSize() == 0)
+        {
+            bl.addBatch(*app, ledger, getAppLedgerVersion(app), {},
+                        LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                            {CONTRACT_CODE, CONTRACT_DATA}, 5, keys),
+                        LedgerTestUtils::generateValidUniqueLedgerKeysWithTypes(
+                            {CONTRACT_CODE, CONTRACT_DATA}, 5, keys));
+
+            // Once all entries merge to the bottom level, only deleted entries
+            // should remain
+            numExpectedEntries += 5;
+
+            ++ledger;
+        }
+
+        // Close ledgers until all entries have merged into the bottom level
+        // bucket
+        while (countNonBottomLevelEntries() != 0)
+        {
+            bl.addBatch(*app, ledger, getAppLedgerVersion(app), {}, {}, {});
+            ++ledger;
+        }
+
+        auto bottomCurr =
+            bl.getLevel(HotArchiveBucketList::kNumLevels - 1).getCurr();
+        REQUIRE(countEntries(bottomCurr) == numExpectedEntries);
+
+        for (HotArchiveBucketInputIterator iter(bottomCurr); iter; ++iter)
+        {
+            auto be = *iter;
+            REQUIRE(be.type() == HOT_ARCHIVE_DELETED);
+            REQUIRE(keys.find(be.key()) != keys.end());
+        }
+    });
+}
+
+TEST_CASE_VERSIONS("live bucket tombstones expire at bottom level",
                    "[bucket][bucketlist][tombstones]")
 {
     VirtualClock clock;
@@ -345,50 +629,43 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level",
 
     for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
         Application::pointer app = createTestApplication(clock, cfg);
-        BucketList bl;
+        LiveBucketList bl;
         BucketManager& bm = app->getBucketManager();
         auto& mergeTimer = bm.getMergeTimer();
         CLOG_INFO(Bucket, "Establishing random bucketlist");
-        for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+        for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
         {
             auto& level = bl.getLevel(i);
-            level.setCurr(Bucket::fresh(
+            level.setCurr(LiveBucket::fresh(
                 bm, getAppLedgerVersion(app), {},
-                LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
-                    {CONFIG_SETTING}, 8),
+                LedgerTestUtils::generateValidUniqueLedgerEntries(8),
                 LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
                     {CONFIG_SETTING}, 5),
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true));
-            level.setSnap(Bucket::fresh(
+            level.setSnap(LiveBucket::fresh(
                 bm, getAppLedgerVersion(app), {},
-                LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
-                    {CONFIG_SETTING}, 8),
+                LedgerTestUtils::generateValidUniqueLedgerEntries(8),
                 LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
                     {CONFIG_SETTING}, 5),
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true));
         }
 
-        for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+        for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
         {
-            std::vector<uint32_t> ledgers = {BucketList::levelHalf(i),
-                                             BucketList::levelSize(i)};
+            std::vector<uint32_t> ledgers = {LiveBucketList::levelHalf(i),
+                                             LiveBucketList::levelSize(i)};
             for (auto j : ledgers)
             {
                 auto n = mergeTimer.count();
-                auto lh =
-                    app->getLedgerManager().getLastClosedLedgerHeader().header;
-                lh.ledgerSeq = j;
-                addBatchAndUpdateSnapshot(
-                    bl, *app, lh, {},
-                    LedgerTestUtils::
-                        generateValidUniqueLedgerEntriesWithExclusions(
-                            {CONFIG_SETTING}, 8),
+                bl.addBatch(
+                    *app, j, getAppLedgerVersion(app), {},
+                    LedgerTestUtils::generateValidUniqueLedgerEntries(8),
                     LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
                         {CONFIG_SETTING}, 5));
                 app->getClock().crank(false);
-                for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k)
+                for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k)
                 {
                     auto& next = bl.getLevel(k).getNext();
                     if (next.isLive())
@@ -401,13 +678,13 @@ TEST_CASE_VERSIONS("bucket tombstones expire at bottom level",
                           "Added batch at ledger {}, merges provoked: {}", j,
                           n);
                 REQUIRE(n > 0);
-                REQUIRE(n < 2 * BucketList::kNumLevels);
+                REQUIRE(n < 2 * LiveBucketList::kNumLevels);
             }
         }
 
-        EntryCounts e0(bl.getLevel(BucketList::kNumLevels - 3).getCurr());
-        EntryCounts e1(bl.getLevel(BucketList::kNumLevels - 2).getCurr());
-        EntryCounts e2(bl.getLevel(BucketList::kNumLevels - 1).getCurr());
+        EntryCounts e0(bl.getLevel(LiveBucketList::kNumLevels - 3).getCurr());
+        EntryCounts e1(bl.getLevel(LiveBucketList::kNumLevels - 2).getCurr());
+        EntryCounts e2(bl.getLevel(LiveBucketList::kNumLevels - 1).getCurr());
         REQUIRE(e0.nDead != 0);
         REQUIRE(e1.nDead != 0);
         REQUIRE(e2.nDead == 0);
@@ -422,7 +699,8 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries",
 
     for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
         Application::pointer app = createTestApplication(clock, cfg);
-        BucketList bl;
+        LiveBucketList bl;
+        auto vers = getAppLedgerVersion(app);
         autocheck::generator<bool> flip;
         std::deque<LedgerEntry> entriesToModify;
         for (uint32_t i = 1; i < 512; ++i)
@@ -458,13 +736,9 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries",
                     deadEntries.push_back(LedgerEntryKey(e));
                 }
             }
-            auto lh =
-                app->getLedgerManager().getLastClosedLedgerHeader().header;
-            lh.ledgerSeq = i;
-            addBatchAndUpdateSnapshot(bl, *app, lh, initEntries, liveEntries,
-                                      deadEntries);
+            bl.addBatch(*app, i, vers, initEntries, liveEntries, deadEntries);
             app->getClock().crank(false);
-            for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k)
+            for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k)
             {
                 auto& next = bl.getLevel(k).getNext();
                 if (next.isLive())
@@ -473,14 +747,15 @@ TEST_CASE_VERSIONS("bucket tombstones mutually-annihilate init entries",
                 }
             }
         }
-        for (uint32_t k = 0u; k < BucketList::kNumLevels; ++k)
+        for (uint32_t k = 0u; k < LiveBucketList::kNumLevels; ++k)
         {
             auto const& lev = bl.getLevel(k);
             auto currSz = countEntries(lev.getCurr());
             auto snapSz = countEntries(lev.getSnap());
             if (protocolVersionStartsFrom(
                     cfg.LEDGER_PROTOCOL_VERSION,
-                    Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+                    LiveBucket::
+                        FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
             {
                 // init/dead pairs should mutually-annihilate pretty readily as
                 // they go, empirically this test peaks at buckets around 400
@@ -501,35 +776,29 @@ TEST_CASE_VERSIONS("single entry bubbling up",
     {
         for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
             Application::pointer app = createTestApplication(clock, cfg);
-            BucketList bl;
+            LiveBucketList bl;
             std::vector<stellar::LedgerKey> emptySet;
             std::vector<stellar::LedgerEntry> emptySetEntry;
 
             CLOG_DEBUG(Bucket, "Adding single entry in lowest level");
-            addBatchAndUpdateSnapshot(
-                bl, *app,
-                app->getLedgerManager().getLastClosedLedgerHeader().header, {},
-                LedgerTestUtils::generateValidLedgerEntriesWithExclusions(
-                    {CONFIG_SETTING}, 1),
-                emptySet);
+            bl.addBatch(*app, 1, getAppLedgerVersion(app), {},
+                        LedgerTestUtils::generateValidLedgerEntries(1),
+                        emptySet);
 
             CLOG_DEBUG(Bucket, "Adding empty batches to bucket list");
             for (uint32_t i = 2;
                  !app->getClock().getIOContext().stopped() && i < 300; ++i)
             {
                 app->getClock().crank(false);
-                auto lh =
-                    app->getLedgerManager().getLastClosedLedgerHeader().header;
-                lh.ledgerSeq = i;
-                addBatchAndUpdateSnapshot(bl, *app, lh, {}, emptySetEntry,
-                                          emptySet);
+                bl.addBatch(*app, i, getAppLedgerVersion(app), {},
+                            emptySetEntry, emptySet);
                 if (i % 10 == 0)
                     CLOG_DEBUG(Bucket, "Added batch {}, hash={}", i,
                                binToHex(bl.getHash()));
 
                 CLOG_DEBUG(Bucket, "------- ledger {}", i);
 
-                for (uint32_t j = 0; j <= BucketList::kNumLevels - 1; ++j)
+                for (uint32_t j = 0; j <= LiveBucketList::kNumLevels - 1; ++j)
                 {
                     uint32_t lb = lowBoundExclusive(j, i);
                     uint32_t hb = highBoundInclusive(j, i);
@@ -561,31 +830,32 @@ TEST_CASE_VERSIONS("single entry bubbling up",
     }
 }
 
-TEST_CASE("BucketList sizeOf and oldestLedgerIn relations",
-          "[bucket][bucketlist][count]")
+template <class BucketListT>
+static void
+sizeOfTests()
 {
     stellar::uniform_int_distribution<uint32_t> dist;
     for (uint32_t i = 0; i < 1000; ++i)
     {
-        for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+        for (uint32_t level = 0; level < BucketListT::kNumLevels; ++level)
         {
             uint32_t ledger = dist(gRandomEngine);
-            if (BucketList::sizeOfSnap(ledger, level) > 0)
+            if (BucketListT::sizeOfSnap(ledger, level) > 0)
             {
                 uint32_t oldestInCurr =
-                    BucketList::oldestLedgerInSnap(ledger, level) +
-                    BucketList::sizeOfSnap(ledger, level);
+                    BucketListT::oldestLedgerInSnap(ledger, level) +
+                    BucketListT::sizeOfSnap(ledger, level);
                 REQUIRE(oldestInCurr ==
-                        BucketList::oldestLedgerInCurr(ledger, level));
+                        BucketListT::oldestLedgerInCurr(ledger, level));
             }
-            if (BucketList::sizeOfCurr(ledger, level) > 0)
+            if (BucketListT::sizeOfCurr(ledger, level) > 0)
             {
                 uint32_t newestInCurr =
-                    BucketList::oldestLedgerInCurr(ledger, level) +
-                    BucketList::sizeOfCurr(ledger, level) - 1;
+                    BucketListT::oldestLedgerInCurr(ledger, level) +
+                    BucketListT::sizeOfCurr(ledger, level) - 1;
                 REQUIRE(newestInCurr == (level == 0
                                              ? ledger
-                                             : BucketList::oldestLedgerInSnap(
+                                             : BucketListT::oldestLedgerInSnap(
                                                    ledger, level - 1) -
                                                    1));
             }
@@ -593,13 +863,29 @@ TEST_CASE("BucketList sizeOf and oldestLedgerIn relations",
     }
 }
 
-TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]")
+TEST_CASE("BucketList sizeOf and oldestLedgerIn relations",
+          "[bucket][bucketlist][count]")
+{
+    SECTION("live bl")
+    {
+        sizeOfTests<LiveBucketList>();
+    }
+
+    SECTION("hot archive bl")
+    {
+        sizeOfTests<HotArchiveBucketList>();
+    }
+}
+
+template <class BucketListT>
+static void
+snapSteadyStateTest()
 {
     // Deliberately exclude deepest level since snap on the deepest level
     // is always empty.
-    for (uint32_t level = 0; level < BucketList::kNumLevels - 1; ++level)
+    for (uint32_t level = 0; level < BucketListT::kNumLevels - 1; ++level)
     {
-        uint32_t const half = BucketList::levelHalf(level);
+        uint32_t const half = BucketListT::levelHalf(level);
 
         // Use binary search (assuming that it does reach steady state)
         // to find the ledger where the snap at this level first reaches
@@ -607,7 +893,7 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]")
         uint32_t boundary = binarySearchForLedger(
             1, std::numeric_limits<uint32_t>::max() / 2,
             [level, half](uint32_t ledger) {
-                return (BucketList::sizeOfSnap(ledger, level) == half);
+                return (BucketListT::sizeOfSnap(ledger, level) == half);
             });
 
         // Generate random ledgers above and below the split to test that
@@ -618,21 +904,36 @@ TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]")
         {
             uint32_t low = distLow(gRandomEngine);
             uint32_t high = distHigh(gRandomEngine);
-            REQUIRE(BucketList::sizeOfSnap(low, level) < half);
-            REQUIRE(BucketList::sizeOfSnap(high, level) == half);
+            REQUIRE(BucketListT::sizeOfSnap(low, level) < half);
+            REQUIRE(BucketListT::sizeOfSnap(high, level) == half);
         }
     }
 }
 
-TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]")
+TEST_CASE("BucketList snap reaches steady state", "[bucket][bucketlist][count]")
+{
+    SECTION("live bl")
+    {
+        snapSteadyStateTest<LiveBucketList>();
+    }
+
+    SECTION("hot archive bl")
+    {
+        snapSteadyStateTest<HotArchiveBucketList>();
+    }
+}
+
+template <class BucketListT>
+static void
+deepestCurrTest()
 {
-    uint32_t const deepest = BucketList::kNumLevels - 1;
+    uint32_t const deepest = BucketListT::kNumLevels - 1;
     // Use binary search to find the first ledger where the deepest curr
     // first is non-empty.
     uint32_t boundary = binarySearchForLedger(
         1, std::numeric_limits<uint32_t>::max() / 2,
         [deepest](uint32_t ledger) {
-            return (BucketList::sizeOfCurr(ledger, deepest) > 0);
+            return (BucketListT::sizeOfCurr(ledger, deepest) > 0);
         });
     stellar::uniform_int_distribution<uint32_t> distLow(1, boundary - 1);
     stellar::uniform_int_distribution<uint32_t> distHigh(boundary);
@@ -640,29 +941,57 @@ TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]")
     {
         uint32_t low = distLow(gRandomEngine);
         uint32_t high = distHigh(gRandomEngine);
-        REQUIRE(BucketList::sizeOfCurr(low, deepest) == 0);
-        REQUIRE(BucketList::oldestLedgerInCurr(low, deepest) ==
+        REQUIRE(BucketListT::sizeOfCurr(low, deepest) == 0);
+        REQUIRE(BucketListT::oldestLedgerInCurr(low, deepest) ==
                 std::numeric_limits<uint32_t>::max());
-        REQUIRE(BucketList::sizeOfCurr(high, deepest) > 0);
-        REQUIRE(BucketList::oldestLedgerInCurr(high, deepest) == 1);
+        REQUIRE(BucketListT::sizeOfCurr(high, deepest) > 0);
+        REQUIRE(BucketListT::oldestLedgerInCurr(high, deepest) == 1);
 
-        REQUIRE(BucketList::sizeOfSnap(low, deepest) == 0);
-        REQUIRE(BucketList::oldestLedgerInSnap(low, deepest) ==
+        REQUIRE(BucketListT::sizeOfSnap(low, deepest) == 0);
+        REQUIRE(BucketListT::oldestLedgerInSnap(low, deepest) ==
                 std::numeric_limits<uint32_t>::max());
-        REQUIRE(BucketList::sizeOfSnap(high, deepest) == 0);
-        REQUIRE(BucketList::oldestLedgerInSnap(high, deepest) ==
+        REQUIRE(BucketListT::sizeOfSnap(high, deepest) == 0);
+        REQUIRE(BucketListT::oldestLedgerInSnap(high, deepest) ==
                 std::numeric_limits<uint32_t>::max());
     }
 }
 
+TEST_CASE("BucketList deepest curr accumulates", "[bucket][bucketlist][count]")
+{
+    SECTION("live bl")
+    {
+        deepestCurrTest<LiveBucketList>();
+    }
+
+    SECTION("hot archive bl")
+    {
+        deepestCurrTest<HotArchiveBucketList>();
+    }
+}
+
+template <class BucketListT>
+static void
+blSizesAtLedger1Test()
+{
+    REQUIRE(BucketListT::sizeOfCurr(1, 0) == 1);
+    REQUIRE(BucketListT::sizeOfSnap(1, 0) == 0);
+    for (uint32_t level = 1; level < BucketListT::kNumLevels; ++level)
+    {
+        REQUIRE(BucketListT::sizeOfCurr(1, level) == 0);
+        REQUIRE(BucketListT::sizeOfSnap(1, level) == 0);
+    }
+}
+
 TEST_CASE("BucketList sizes at ledger 1", "[bucket][bucketlist][count]")
 {
-    REQUIRE(BucketList::sizeOfCurr(1, 0) == 1);
-    REQUIRE(BucketList::sizeOfSnap(1, 0) == 0);
-    for (uint32_t level = 1; level < BucketList::kNumLevels; ++level)
+    SECTION("live bl")
     {
-        REQUIRE(BucketList::sizeOfCurr(1, level) == 0);
-        REQUIRE(BucketList::sizeOfSnap(1, level) == 0);
+        blSizesAtLedger1Test<LiveBucketList>();
+    }
+
+    SECTION("hot archive bl")
+    {
+        blSizesAtLedger1Test<HotArchiveBucketList>();
     }
 }
 
@@ -671,7 +1000,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]")
     VirtualClock clock;
     Config cfg(getTestConfig());
     Application::pointer app = createTestApplication(clock, cfg);
-    BucketList& bl = app->getBucketManager().getBucketList();
+    LiveBucketList& bl = app->getBucketManager().getLiveBucketList();
     std::vector<LedgerKey> emptySet;
     auto ledgers =
         LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
@@ -685,10 +1014,10 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]")
             auto lh =
                 app->getLedgerManager().getLastClosedLedgerHeader().header;
             lh.ledgerSeq = ledgerSeq;
-            addBatchAndUpdateSnapshot(bl, *app, lh, {},
-                                      {ledgers[ledgerSeq - 1]}, emptySet);
+            addLiveBatchAndUpdateSnapshot(*app, lh, {},
+                                          {ledgers[ledgerSeq - 1]}, emptySet);
         }
-        for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+        for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
         {
             checkBucketSizeAndBounds(bl, ledgerSeq, level, true);
             checkBucketSizeAndBounds(bl, ledgerSeq, level, false);
@@ -699,7 +1028,7 @@ TEST_CASE("BucketList check bucket sizes", "[bucket][bucketlist][count]")
 TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]")
 {
     VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
     cfg.USE_CONFIG_FOR_GENESIS = true;
 
     auto app = createTestApplication<BucketTestApplication>(clock, cfg);
@@ -767,7 +1096,7 @@ TEST_CASE_VERSIONS("network config snapshots BucketList size", "[bucketlist]")
             {
                 correctWindow.pop_front();
                 correctWindow.push_back(
-                    app->getBucketManager().getBucketList().getSize());
+                    app->getBucketManager().getLiveBucketList().getSize());
             }
 
             lm.setNextLedgerEntryBatchForBucketTesting(
@@ -792,341 +1121,367 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]")
     Config cfg(getTestConfig());
     cfg.USE_CONFIG_FOR_GENESIS = true;
 
-    auto test = [&](bool backgroundScan) {
-        // BucketTestApplication writes directly to BL and circumvents LedgerTxn
-        // interface, so we have to use BucketListDB for lookups
-        cfg.DEPRECATED_SQL_LEDGER_STATE = false;
-        cfg.BACKGROUND_EVICTION_SCAN = backgroundScan;
-
-        auto app = createTestApplication<BucketTestApplication>(clock, cfg);
-        for_versions_from(20, *app, [&] {
-            LedgerManagerForBucketTests& lm = app->getLedgerManager();
-            auto& bm = app->getBucketManager();
-            auto& bl = bm.getBucketList();
-
-            auto& networkCfg = [&]() -> SorobanNetworkConfig& {
-                LedgerTxn ltx(app->getLedgerTxnRoot());
-                return app->getLedgerManager().getMutableSorobanNetworkConfig();
-            }();
-
-            auto& stateArchivalSettings = networkCfg.stateArchivalSettings();
-            auto& evictionIter = networkCfg.evictionIterator();
-            auto const levelToScan = 3;
-            uint32_t ledgerSeq = 1;
-
-            stateArchivalSettings.minTemporaryTTL = 1;
-            stateArchivalSettings.minPersistentTTL = 1;
-
-            // Because this test uses BucketTestApplication, we must manually
-            // add the Network Config LedgerEntries to the BucketList with
-            // setNextLedgerEntryBatchForBucketTesting whenever state archival
-            // settings or the eviction iterator is manually changed
-            auto getNetworkCfgLE = [&] {
-                std::vector<LedgerEntry> result;
-                LedgerEntry sesLE;
-                sesLE.data.type(CONFIG_SETTING);
-                sesLE.data.configSetting().configSettingID(
-                    ConfigSettingID::CONFIG_SETTING_STATE_ARCHIVAL);
-                sesLE.data.configSetting().stateArchivalSettings() =
-                    stateArchivalSettings;
-                result.emplace_back(sesLE);
-
-                LedgerEntry iterLE;
-                iterLE.data.type(CONFIG_SETTING);
-                iterLE.data.configSetting().configSettingID(
-                    ConfigSettingID::CONFIG_SETTING_EVICTION_ITERATOR);
-                iterLE.data.configSetting().evictionIterator() = evictionIter;
-                result.emplace_back(iterLE);
-
-                return result;
-            };
+    auto app = createTestApplication<BucketTestApplication>(clock, cfg);
+    for_versions_from(20, *app, [&] {
+        LedgerManagerForBucketTests& lm = app->getLedgerManager();
+        auto& bm = app->getBucketManager();
+        auto& bl = bm.getLiveBucketList();
+
+        auto& networkCfg = [&]() -> SorobanNetworkConfig& {
+            LedgerTxn ltx(app->getLedgerTxnRoot());
+            return app->getLedgerManager().getMutableSorobanNetworkConfig();
+        }();
+
+        auto& stateArchivalSettings = networkCfg.stateArchivalSettings();
+        auto& evictionIter = networkCfg.evictionIterator();
+        auto const levelToScan = 3;
+        uint32_t ledgerSeq = 1;
+
+        stateArchivalSettings.minTemporaryTTL = 1;
+        stateArchivalSettings.minPersistentTTL = 1;
+
+        // Because this test uses BucketTestApplication, we must manually
+        // add the Network Config LedgerEntries to the BucketList with
+        // setNextLedgerEntryBatchForBucketTesting whenever state archival
+        // settings or the eviction iterator is manually changed
+        auto getNetworkCfgLE = [&] {
+            std::vector<LedgerEntry> result;
+            LedgerEntry sesLE;
+            sesLE.data.type(CONFIG_SETTING);
+            sesLE.data.configSetting().configSettingID(
+                ConfigSettingID::CONFIG_SETTING_STATE_ARCHIVAL);
+            sesLE.data.configSetting().stateArchivalSettings() =
+                stateArchivalSettings;
+            result.emplace_back(sesLE);
+
+            LedgerEntry iterLE;
+            iterLE.data.type(CONFIG_SETTING);
+            iterLE.data.configSetting().configSettingID(
+                ConfigSettingID::CONFIG_SETTING_EVICTION_ITERATOR);
+            iterLE.data.configSetting().evictionIterator() = evictionIter;
+            result.emplace_back(iterLE);
+
+            return result;
+        };
 
-            auto updateNetworkCfg = [&] {
-                lm.setNextLedgerEntryBatchForBucketTesting(
-                    {}, getNetworkCfgLE(), {});
-                closeLedger(*app);
-                ++ledgerSeq;
-            };
+        auto updateNetworkCfg = [&] {
+            lm.setNextLedgerEntryBatchForBucketTesting({}, getNetworkCfgLE(),
+                                                       {});
+            closeLedger(*app);
+            ++ledgerSeq;
+        };
 
-            auto checkIfEntryExists = [&](std::set<LedgerKey> const& keys,
-                                          bool shouldExist) {
-                LedgerTxn ltx(app->getLedgerTxnRoot());
-                for (auto const& key : keys)
-                {
-                    auto txle = ltx.loadWithoutRecord(key);
-                    REQUIRE(static_cast<bool>(txle) == shouldExist);
+        auto checkIfEntryExists = [&](std::set<LedgerKey> const& keys,
+                                      bool shouldExist) {
+            LedgerTxn ltx(app->getLedgerTxnRoot());
+            for (auto const& key : keys)
+            {
+                auto txle = ltx.loadWithoutRecord(key);
+                REQUIRE(static_cast<bool>(txle) == shouldExist);
 
-                    auto TTLTxle = ltx.loadWithoutRecord(getTTLKey(key));
-                    REQUIRE(static_cast<bool>(TTLTxle) == shouldExist);
-                }
-            };
+                auto TTLTxle = ltx.loadWithoutRecord(getTTLKey(key));
+                REQUIRE(static_cast<bool>(TTLTxle) == shouldExist);
+            }
+        };
 
-            std::set<LedgerKey> tempEntries;
-            std::set<LedgerKey> persistentEntries;
-            std::vector<LedgerEntry> entries;
-            for (auto& e :
-                 LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
-                     {CONTRACT_DATA}, 50))
+        std::set<LedgerKey> tempEntries;
+        std::set<LedgerKey> persistentEntries;
+        std::vector<LedgerEntry> entries;
+        for (auto& e :
+             LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                 {CONTRACT_DATA}, 50))
+        {
+            // Set half of the entries to be persistent, half temporary
+            if (tempEntries.empty() || rand_flip())
             {
-                // Set half of the entries to be persistent, half temporary
-                if (tempEntries.empty() || rand_flip())
-                {
-                    e.data.contractData().durability = TEMPORARY;
-                    tempEntries.emplace(LedgerEntryKey(e));
-                }
-                else
-                {
-                    e.data.contractData().durability = PERSISTENT;
-                    persistentEntries.emplace(LedgerEntryKey(e));
-                }
+                e.data.contractData().durability = TEMPORARY;
+                tempEntries.emplace(LedgerEntryKey(e));
+            }
+            else
+            {
+                e.data.contractData().durability = PERSISTENT;
+                persistentEntries.emplace(LedgerEntryKey(e));
+            }
 
-                LedgerEntry TTLEntry;
-                TTLEntry.data.type(TTL);
-                TTLEntry.data.ttl().keyHash = getTTLKey(e).ttl().keyHash;
-                TTLEntry.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1;
+            LedgerEntry TTLEntry;
+            TTLEntry.data.type(TTL);
+            TTLEntry.data.ttl().keyHash = getTTLKey(e).ttl().keyHash;
+            TTLEntry.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1;
 
-                entries.emplace_back(e);
-                entries.emplace_back(TTLEntry);
-            }
+            entries.emplace_back(e);
+            entries.emplace_back(TTLEntry);
+        }
+
+        lm.setNextLedgerEntryBatchForBucketTesting(entries, getNetworkCfgLE(),
+                                                   {});
+        closeLedger(*app);
+        ++ledgerSeq;
 
-            lm.setNextLedgerEntryBatchForBucketTesting(entries,
-                                                       getNetworkCfgLE(), {});
+        // Iterate until entries reach the level where eviction will start
+        for (; bl.getLevel(levelToScan).getCurr()->isEmpty(); ++ledgerSeq)
+        {
+            checkIfEntryExists(tempEntries, true);
+            checkIfEntryExists(persistentEntries, true);
+            lm.setNextLedgerEntryBatchForBucketTesting({}, {}, {});
             closeLedger(*app);
-            ++ledgerSeq;
+        }
 
-            // Iterate until entries reach the level where eviction will start
-            for (; bl.getLevel(levelToScan).getCurr()->isEmpty(); ++ledgerSeq)
+        SECTION("basic eviction test")
+        {
+            // Set eviction to start at level where the entries
+            // currently are
+            stateArchivalSettings.startingEvictionScanLevel = levelToScan;
+            updateNetworkCfg();
+
+            // All entries should be evicted at once
+            closeLedger(*app);
+            ++ledgerSeq;
+            checkIfEntryExists(tempEntries, false);
+            checkIfEntryExists(persistentEntries, true);
+
+            auto& entriesEvictedCounter = bm.getEntriesEvictedCounter();
+            REQUIRE(entriesEvictedCounter.count() == tempEntries.size());
+
+            // Close ledgers until evicted DEADENTRYs merge with
+            // original INITENTRYs. This checks that BucketList
+            // invariants are respected
+            for (auto initialDeadMerges =
+                     bm.readMergeCounters().mOldInitEntriesMergedWithNewDead;
+                 bm.readMergeCounters().mOldInitEntriesMergedWithNewDead <
+                 initialDeadMerges + tempEntries.size();
+                 ++ledgerSeq)
             {
-                checkIfEntryExists(tempEntries, true);
-                checkIfEntryExists(persistentEntries, true);
-                lm.setNextLedgerEntryBatchForBucketTesting({}, {}, {});
                 closeLedger(*app);
             }
 
-            SECTION("basic eviction test")
-            {
-                // Set eviction to start at level where the entries
-                // currently are
-                stateArchivalSettings.startingEvictionScanLevel = levelToScan;
-                updateNetworkCfg();
+            REQUIRE(entriesEvictedCounter.count() == tempEntries.size());
+        }
 
-                // All entries should be evicted at once
-                closeLedger(*app);
-                ++ledgerSeq;
-                checkIfEntryExists(tempEntries, false);
-                checkIfEntryExists(persistentEntries, true);
-
-                auto& entriesEvictedCounter = bm.getEntriesEvictedCounter();
-                REQUIRE(entriesEvictedCounter.count() == tempEntries.size());
-
-                // Close ledgers until evicted DEADENTRYs merge with
-                // original INITENTRYs. This checks that BucketList
-                // invariants are respected
-                for (auto initialDeadMerges =
-                         bm.readMergeCounters()
-                             .mOldInitEntriesMergedWithNewDead;
-                     bm.readMergeCounters().mOldInitEntriesMergedWithNewDead <
-                     initialDeadMerges + tempEntries.size();
-                     ++ledgerSeq)
+        SECTION("shadowed entries not evicted")
+        {
+            // Set eviction to start at level where the entries
+            // currently are
+            stateArchivalSettings.startingEvictionScanLevel = levelToScan;
+            updateNetworkCfg();
+
+            // Shadow non-live entries with updated, live versions
+            for (auto& e : entries)
+            {
+                // Only need to update TTLEntries
+                if (e.data.type() == TTL)
                 {
-                    closeLedger(*app);
+                    e.data.ttl().liveUntilLedgerSeq = ledgerSeq + 10;
                 }
-
-                REQUIRE(entriesEvictedCounter.count() == tempEntries.size());
             }
+            lm.setNextLedgerEntryBatchForBucketTesting({}, entries, {});
 
-            SECTION("shadowed entries not evicted")
-            {
-                // Set eviction to start at level where the entries
-                // currently are
-                stateArchivalSettings.startingEvictionScanLevel = levelToScan;
-                updateNetworkCfg();
+            // Close two ledgers to give eviction scan opportunity to
+            // process new entries
+            closeLedger(*app);
+            closeLedger(*app);
 
-                // Shadow non-live entries with updated, live versions
-                for (auto& e : entries)
-                {
-                    // Only need to update TTLEntries
-                    if (e.data.type() == TTL)
-                    {
-                        e.data.ttl().liveUntilLedgerSeq = ledgerSeq + 10;
-                    }
-                }
-                lm.setNextLedgerEntryBatchForBucketTesting({}, entries, {});
+            // Entries are shadowed, should not be evicted
+            checkIfEntryExists(tempEntries, true);
+            checkIfEntryExists(persistentEntries, true);
+        }
 
-                // Close two ledgers to give eviction scan opportunity to
-                // process new entries
-                closeLedger(*app);
+        SECTION("maxEntriesToArchive")
+        {
+            // Check that we only evict one entry at a time
+            stateArchivalSettings.maxEntriesToArchive = 1;
+            stateArchivalSettings.startingEvictionScanLevel = levelToScan;
+            updateNetworkCfg();
+
+            auto& entriesEvictedCounter = bm.getEntriesEvictedCounter();
+            auto prevIter = evictionIter;
+            for (auto prevCount = entriesEvictedCounter.count();
+                 prevCount < tempEntries.size();)
+            {
                 closeLedger(*app);
 
-                // Entries are shadowed, should not be evicted
-                checkIfEntryExists(tempEntries, true);
-                checkIfEntryExists(persistentEntries, true);
+                // Make sure we evict all entries without circling back
+                // through the BucketList
+                auto didAdvance =
+                    prevIter.bucketFileOffset < evictionIter.bucketFileOffset ||
+                    prevIter.bucketListLevel < evictionIter.bucketListLevel ||
+                    // assert isCurrBucket goes from true -> false
+                    // true > false == 1 > 0
+                    prevIter.isCurrBucket > evictionIter.isCurrBucket;
+                REQUIRE(didAdvance);
+
+                // Check that we only evict at most maxEntriesToArchive
+                // per ledger
+                auto newCount = entriesEvictedCounter.count();
+                REQUIRE((newCount == prevCount || newCount == prevCount + 1));
+                prevCount = newCount;
             }
 
-            SECTION("maxEntriesToArchive")
-            {
-                // Check that we only evict one entry at a time
-                stateArchivalSettings.maxEntriesToArchive = 1;
-                stateArchivalSettings.startingEvictionScanLevel = levelToScan;
-                updateNetworkCfg();
+            // All entries should have been evicted
+            checkIfEntryExists(tempEntries, false);
+            checkIfEntryExists(persistentEntries, true);
+        }
 
-                auto& entriesEvictedCounter = bm.getEntriesEvictedCounter();
-                auto prevIter = evictionIter;
-                for (auto prevCount = entriesEvictedCounter.count();
-                     prevCount < tempEntries.size();)
-                {
-                    closeLedger(*app);
+        SECTION("maxEntriesToArchive with entry modified on eviction ledger")
+        {
 
-                    // Make sure we evict all entries without circling back
-                    // through the BucketList
-                    auto didAdvance =
-                        prevIter.bucketFileOffset <
-                            evictionIter.bucketFileOffset ||
-                        prevIter.bucketListLevel <
-                            evictionIter.bucketListLevel ||
-                        // assert isCurrBucket goes from true -> false
-                        // true > false == 1 > 0
-                        prevIter.isCurrBucket > evictionIter.isCurrBucket;
-                    REQUIRE(didAdvance);
-
-                    // Check that we only evict at most maxEntriesToArchive
-                    // per ledger
-                    auto newCount = entriesEvictedCounter.count();
-                    REQUIRE(
-                        (newCount == prevCount || newCount == prevCount + 1));
-                    prevCount = newCount;
-                }
+            // This test is for an edge case in background eviction.
+            // We want to test that if entry n should be the last entry
+            // evicted due to maxEntriesToArchive, but that entry is
+            // updated on the eviction ledger, background eviction
+            // should still evict entry n + 1
+            stateArchivalSettings.maxEntriesToArchive = 1;
+            stateArchivalSettings.startingEvictionScanLevel = levelToScan;
+            updateNetworkCfg();
 
-                // All entries should have been evicted
-                checkIfEntryExists(tempEntries, false);
-                checkIfEntryExists(persistentEntries, true);
-            }
+            // First temp entry in Bucket will be updated with live TTL
+            std::optional<LedgerKey> entryToUpdate{};
+
+            // Second temp entry in bucket should be evicted
+            LedgerKey entryToEvict;
+            std::optional<uint64_t> expectedEndIterPosition{};
 
-            SECTION(
-                "maxEntriesToArchive with entry modified on eviction ledger")
+            for (LiveBucketInputIterator in(bl.getLevel(levelToScan).getCurr());
+                 in; ++in)
             {
-                if (backgroundScan)
+                // Temp entries should be sorted before persistent in
+                // the Bucket
+                auto be = *in;
+                if (be.type() == INITENTRY || be.type() == LIVEENTRY)
                 {
-                    // This test is for an edge case in background eviction.
-                    // We want to test that if entry n should be the last entry
-                    // evicted due to maxEntriesToArchive, but that entry is
-                    // updated on the eviction ledger, background eviction
-                    // should still evict entry n + 1
-                    stateArchivalSettings.maxEntriesToArchive = 1;
-                    stateArchivalSettings.startingEvictionScanLevel =
-                        levelToScan;
-                    updateNetworkCfg();
-
-                    // First temp entry in Bucket will be updated with live TTL
-                    std::optional<LedgerKey> entryToUpdate{};
-
-                    // Second temp entry in bucket should be evicted
-                    LedgerKey entryToEvict;
-                    std::optional<uint64_t> expectedEndIterPosition{};
-
-                    for (BucketInputIterator in(
-                             bl.getLevel(levelToScan).getCurr());
-                         in; ++in)
+                    auto le = be.liveEntry();
+                    if (le.data.type() == CONTRACT_DATA &&
+                        le.data.contractData().durability == TEMPORARY)
                     {
-                        // Temp entries should be sorted before persistent in
-                        // the Bucket
-                        auto be = *in;
-                        if (be.type() == INITENTRY || be.type() == LIVEENTRY)
+                        if (!entryToUpdate)
                         {
-                            auto le = be.liveEntry();
-                            if (le.data.type() == CONTRACT_DATA &&
-                                le.data.contractData().durability == TEMPORARY)
-                            {
-                                if (!entryToUpdate)
-                                {
-                                    entryToUpdate = LedgerEntryKey(le);
-                                }
-                                else
-                                {
-                                    entryToEvict = LedgerEntryKey(le);
-                                    expectedEndIterPosition = in.pos();
-                                    break;
-                                }
-                            }
+                            entryToUpdate = LedgerEntryKey(le);
+                        }
+                        else
+                        {
+                            entryToEvict = LedgerEntryKey(le);
+                            expectedEndIterPosition = in.pos();
+                            break;
                         }
                     }
+                }
+            }
 
-                    REQUIRE(expectedEndIterPosition.has_value());
+            REQUIRE(expectedEndIterPosition.has_value());
 
-                    // Update first evictable entry with new TTL
-                    auto ttlKey = getTTLKey(*entryToUpdate);
-                    LedgerEntry ttlLe;
-                    ttlLe.data.type(TTL);
-                    ttlLe.data.ttl().keyHash = ttlKey.ttl().keyHash;
-                    ttlLe.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1;
+            // Update first evictable entry with new TTL
+            auto ttlKey = getTTLKey(*entryToUpdate);
+            LedgerEntry ttlLe;
+            ttlLe.data.type(TTL);
+            ttlLe.data.ttl().keyHash = ttlKey.ttl().keyHash;
+            ttlLe.data.ttl().liveUntilLedgerSeq = ledgerSeq + 1;
 
-                    lm.setNextLedgerEntryBatchForBucketTesting({}, {ttlLe}, {});
-                    closeLedger(*app);
+            lm.setNextLedgerEntryBatchForBucketTesting({}, {ttlLe}, {});
+            closeLedger(*app);
 
-                    LedgerTxn ltx(app->getLedgerTxnRoot());
-                    auto firstEntry = ltx.loadWithoutRecord(*entryToUpdate);
-                    REQUIRE(static_cast<bool>(firstEntry));
+            LedgerTxn ltx(app->getLedgerTxnRoot());
+            auto firstEntry = ltx.loadWithoutRecord(*entryToUpdate);
+            REQUIRE(static_cast<bool>(firstEntry));
 
-                    auto evictedEntry = ltx.loadWithoutRecord(entryToEvict);
-                    REQUIRE(!static_cast<bool>(evictedEntry));
+            auto evictedEntry = ltx.loadWithoutRecord(entryToEvict);
+            REQUIRE(!static_cast<bool>(evictedEntry));
 
-                    REQUIRE(evictionIter.bucketFileOffset ==
-                            *expectedEndIterPosition);
-                    REQUIRE(evictionIter.bucketListLevel == levelToScan);
-                    REQUIRE(evictionIter.isCurrBucket == true);
-                }
-            }
+            REQUIRE(evictionIter.bucketFileOffset == *expectedEndIterPosition);
+            REQUIRE(evictionIter.bucketListLevel == levelToScan);
+            REQUIRE(evictionIter.isCurrBucket == true);
+        }
 
-            auto constexpr xdrOverheadBytes = 4;
+        auto constexpr xdrOverheadBytes = 4;
 
-            BucketInputIterator metaIn(bl.getLevel(0).getCurr());
-            BucketEntry be(METAENTRY);
-            be.metaEntry() = metaIn.getMetadata();
-            auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes;
+        LiveBucketInputIterator metaIn(bl.getLevel(0).getCurr());
+        BucketEntry be(METAENTRY);
+        be.metaEntry() = metaIn.getMetadata();
+        auto const metadataSize = xdr::xdr_size(be) + xdrOverheadBytes;
 
-            SECTION("evictionScanSize")
-            {
-                // Set smallest possible scan size so eviction iterator
-                // scans one entry per scan
-                stateArchivalSettings.evictionScanSize = 1;
-                stateArchivalSettings.startingEvictionScanLevel = levelToScan;
-                updateNetworkCfg();
+        SECTION("evictionScanSize")
+        {
+            // Set smallest possible scan size so eviction iterator
+            // scans one entry per scan
+            stateArchivalSettings.evictionScanSize = 1;
+            stateArchivalSettings.startingEvictionScanLevel = levelToScan;
+            updateNetworkCfg();
+
+            // First eviction scan will only read meta
+            closeLedger(*app);
+            ++ledgerSeq;
+
+            REQUIRE(evictionIter.bucketFileOffset == metadataSize);
+            REQUIRE(evictionIter.bucketListLevel == levelToScan);
+            REQUIRE(evictionIter.isCurrBucket == true);
 
-                // First eviction scan will only read meta
+            size_t prevOff = evictionIter.bucketFileOffset;
+            // Check that each scan only reads one entry
+            for (LiveBucketInputIterator in(bl.getLevel(levelToScan).getCurr());
+                 in; ++in)
+            {
+                auto startingOffset = evictionIter.bucketFileOffset;
                 closeLedger(*app);
                 ++ledgerSeq;
 
-                REQUIRE(evictionIter.bucketFileOffset == metadataSize);
-                REQUIRE(evictionIter.bucketListLevel == levelToScan);
-                REQUIRE(evictionIter.isCurrBucket == true);
-
-                size_t prevOff = evictionIter.bucketFileOffset;
-                // Check that each scan only reads one entry
-                for (BucketInputIterator in(bl.getLevel(levelToScan).getCurr());
-                     in; ++in)
+                // If the BL receives an incoming merge, the scan will
+                // reset; break at that point.
+                if (evictionIter.bucketFileOffset < prevOff)
                 {
-                    auto startingOffset = evictionIter.bucketFileOffset;
-                    closeLedger(*app);
-                    ++ledgerSeq;
-
-                    // If the BL receives an incoming merge, the scan will
-                    // reset; break at that point.
-                    if (evictionIter.bucketFileOffset < prevOff)
-                    {
-                        break;
-                    }
-                    prevOff = evictionIter.bucketFileOffset;
-                    REQUIRE(evictionIter.bucketFileOffset ==
-                            xdr::xdr_size(*in) + startingOffset +
-                                xdrOverheadBytes);
-                    REQUIRE(evictionIter.bucketListLevel == levelToScan);
-                    REQUIRE(evictionIter.isCurrBucket == true);
+                    break;
                 }
+                prevOff = evictionIter.bucketFileOffset;
+                REQUIRE(evictionIter.bucketFileOffset ==
+                        xdr::xdr_size(*in) + startingOffset + xdrOverheadBytes);
+                REQUIRE(evictionIter.bucketListLevel == levelToScan);
+                REQUIRE(evictionIter.isCurrBucket == true);
             }
+        }
 
-            SECTION("scans across multiple buckets")
+        SECTION("scans across multiple buckets")
+        {
+            for (; bl.getLevel(2).getSnap()->getSize() < 1'000; ++ledgerSeq)
             {
-                for (; bl.getLevel(2).getSnap()->getSize() < 1'000; ++ledgerSeq)
+                lm.setNextLedgerEntryBatchForBucketTesting(
+                    {},
+                    LedgerTestUtils::generateValidLedgerEntriesWithExclusions(
+                        {CONFIG_SETTING, CONTRACT_DATA, CONTRACT_CODE}, 10),
+                    {});
+                closeLedger(*app);
+            }
+
+            // Reset iterator to level 2 curr bucket that we just populated
+            stateArchivalSettings.startingEvictionScanLevel = 2;
+
+            // Scan size should scan all of curr bucket and one entry in
+            // snap per scan
+            stateArchivalSettings.evictionScanSize =
+                bl.getLevel(2).getCurr()->getSize() + 1;
+
+            // Reset iterator
+            evictionIter.bucketFileOffset = 0;
+            evictionIter.bucketListLevel = 2;
+            evictionIter.isCurrBucket = true;
+            updateNetworkCfg();
+
+            closeLedger(*app);
+            ++ledgerSeq;
+
+            // Iter should have advanced to snap and read first entry only
+            REQUIRE(evictionIter.bucketFileOffset == metadataSize);
+            REQUIRE(evictionIter.bucketListLevel == 2);
+            REQUIRE(evictionIter.isCurrBucket == false);
+        }
+
+        SECTION("iterator resets when bucket changes")
+        {
+            auto testIterReset = [&](bool isCurr) {
+                auto const levelToTest = 1;
+                auto bucket = [&]() {
+                    return isCurr ? bl.getLevel(levelToTest).getCurr()
+                                  : bl.getLevel(levelToTest).getSnap();
+                };
+
+                // Iterate until entries spill into level 1 bucket
+                for (; bucket()->getSize() < 1'000; ++ledgerSeq)
                 {
                     lm.setNextLedgerEntryBatchForBucketTesting(
                         {},
@@ -1138,134 +1493,76 @@ TEST_CASE_VERSIONS("eviction scan", "[bucketlist]")
                     closeLedger(*app);
                 }
 
-                // Reset iterator to level 2 curr bucket that we just populated
-                stateArchivalSettings.startingEvictionScanLevel = 2;
+                // Scan meta entry + one other entry in initial scan
+                stateArchivalSettings.evictionScanSize = metadataSize + 1;
 
-                // Scan size should scan all of curr bucket and one entry in
-                // snap per scan
-                stateArchivalSettings.evictionScanSize =
-                    bl.getLevel(2).getCurr()->getSize() + 1;
-
-                // Reset iterator
+                // Reset eviction iter start of bucket being tested
+                stateArchivalSettings.startingEvictionScanLevel = levelToTest;
                 evictionIter.bucketFileOffset = 0;
-                evictionIter.bucketListLevel = 2;
-                evictionIter.isCurrBucket = true;
+                evictionIter.isCurrBucket = isCurr;
+                evictionIter.bucketListLevel = 1;
                 updateNetworkCfg();
 
-                closeLedger(*app);
-                ++ledgerSeq;
-
-                // Iter should have advanced to snap and read first entry only
-                REQUIRE(evictionIter.bucketFileOffset == metadataSize);
-                REQUIRE(evictionIter.bucketListLevel == 2);
-                REQUIRE(evictionIter.isCurrBucket == false);
-            }
-
-            SECTION("iterator resets when bucket changes")
-            {
-                auto testIterReset = [&](bool isCurr) {
-                    auto const levelToTest = 1;
-                    auto bucket = [&]() {
-                        return isCurr ? bl.getLevel(levelToTest).getCurr()
-                                      : bl.getLevel(levelToTest).getSnap();
-                    };
-
-                    // Iterate until entries spill into level 1 bucket
-                    for (; bucket()->getSize() < 1'000; ++ledgerSeq)
-                    {
-                        lm.setNextLedgerEntryBatchForBucketTesting(
-                            {},
-                            LedgerTestUtils::
-                                generateValidLedgerEntriesWithExclusions(
-                                    {CONFIG_SETTING, CONTRACT_DATA,
-                                     CONTRACT_CODE},
-                                    10),
-                            {});
-                        closeLedger(*app);
-                    }
-
-                    // Scan meta entry + one other entry in initial scan
-                    stateArchivalSettings.evictionScanSize = metadataSize + 1;
-
-                    // Reset eviction iter start of bucket being tested
-                    stateArchivalSettings.startingEvictionScanLevel =
-                        levelToTest;
-                    evictionIter.bucketFileOffset = 0;
-                    evictionIter.isCurrBucket = isCurr;
-                    evictionIter.bucketListLevel = 1;
-                    updateNetworkCfg();
-
-                    // Advance until one ledger before bucket is updated
-                    auto ledgersUntilUpdate =
-                        BucketList::bucketUpdatePeriod(levelToTest, isCurr) -
-                        1; // updateNetworkCfg closes a ledger that we need to
-                           // count
-                    for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i)
-                    {
-                        auto startingIter = evictionIter;
-                        closeLedger(*app);
-                        ++ledgerSeq;
-
-                        // Check that iterator is making progress correctly
-                        REQUIRE(evictionIter.bucketFileOffset >
-                                startingIter.bucketFileOffset);
-                        REQUIRE(evictionIter.bucketListLevel == levelToTest);
-                        REQUIRE(evictionIter.isCurrBucket == isCurr);
-                    }
-
-                    // Next ledger close should update bucket
-                    auto startingHash = bucket()->getHash();
+                // Advance until one ledger before bucket is updated
+                auto ledgersUntilUpdate =
+                    LiveBucketList::bucketUpdatePeriod(levelToTest,
+                                                       isCurr) -
+                    1; // updateNetworkCfg closes a ledger that we need to
+                       // count
+                for (uint32_t i = 0; i < ledgersUntilUpdate - 1; ++i)
+                {
+                    auto startingIter = evictionIter;
                     closeLedger(*app);
                     ++ledgerSeq;
 
-                    // Check that bucket actually changed
-                    REQUIRE(bucket()->getHash() != startingHash);
+                    // Check that iterator is making progress correctly
+                    REQUIRE(evictionIter.bucketFileOffset >
+                            startingIter.bucketFileOffset);
+                    REQUIRE(evictionIter.bucketListLevel == levelToTest);
+                    REQUIRE(evictionIter.isCurrBucket == isCurr);
+                }
 
-                    // The iterator retroactively checks if the Bucket has
-                    // changed, so close one additional ledger to check if the
-                    // iterator has reset
-                    closeLedger(*app);
-                    ++ledgerSeq;
+                // Next ledger close should update bucket
+                auto startingHash = bucket()->getHash();
+                closeLedger(*app);
+                ++ledgerSeq;
 
-                    BucketInputIterator in(bucket());
+                // Check that bucket actually changed
+                REQUIRE(bucket()->getHash() != startingHash);
 
-                    // Check that iterator has reset to beginning of bucket and
-                    // read meta entry + one additional entry
-                    REQUIRE(evictionIter.bucketFileOffset ==
-                            metadataSize + xdr::xdr_size(*in) +
-                                xdrOverheadBytes);
-                    REQUIRE(evictionIter.bucketListLevel == levelToTest);
-                    REQUIRE(evictionIter.isCurrBucket == isCurr);
-                };
+                // The iterator retroactively checks if the Bucket has
+                // changed, so close one additional ledger to check if the
+                // iterator has reset
+                closeLedger(*app);
+                ++ledgerSeq;
 
-                SECTION("curr bucket")
-                {
-                    testIterReset(true);
-                }
+                LiveBucketInputIterator in(bucket());
 
-                SECTION("snap bucket")
-                {
-                    testIterReset(false);
-                }
+                // Check that iterator has reset to beginning of bucket and
+                // read meta entry + one additional entry
+                REQUIRE(evictionIter.bucketFileOffset ==
+                        metadataSize + xdr::xdr_size(*in) + xdrOverheadBytes);
+                REQUIRE(evictionIter.bucketListLevel == levelToTest);
+                REQUIRE(evictionIter.isCurrBucket == isCurr);
+            };
+
+            SECTION("curr bucket")
+            {
+                testIterReset(true);
             }
-        });
-    };
 
-    SECTION("legacy scan")
-    {
-        test(/*backgroundScan=*/false);
-    }
-    SECTION("background scan")
-    {
-        test(/*backgroundScan=*/true);
-    }
+            SECTION("snap bucket")
+            {
+                testIterReset(false);
+            }
+        }
+    });
 }
 
 TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]")
 {
     VirtualClock clock;
     Config cfg(getTestConfig());
-    cfg.DEPRECATED_SQL_LEDGER_STATE = false;
 
     auto app = createTestApplication<BucketTestApplication>(clock, cfg);
     LedgerManagerForBucketTests& lm = app->getLedgerManager();
@@ -1276,7 +1573,7 @@ TEST_CASE_VERSIONS("Searchable BucketListDB snapshots", "[bucketlist]")
     entry.data.claimableBalance().amount = 0;
 
     auto searchableBL =
-        bm.getBucketSnapshotManager().copySearchableBucketListSnapshot();
+        bm.getBucketSnapshotManager().copySearchableLiveBucketListSnapshot();
 
     // Update entry every 5 ledgers so we can see bucket merge events
     for (auto ledgerSeq = 1; ledgerSeq < 101; ++ledgerSeq)
@@ -1343,30 +1640,30 @@ formatLedgerList(std::vector<uint32_t> const& ledgers)
 
 TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]")
 {
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         CLOG_INFO(Bucket, "levelSize({}) = {} (formally)", level,
-                  formatU32(BucketList::levelSize(level)));
+                  formatU32(LiveBucketList::levelSize(level)));
     }
 
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         CLOG_INFO(Bucket, "levelHalf({}) = {} (formally)", level,
-                  formatU32(BucketList::levelHalf(level)));
+                  formatU32(LiveBucketList::levelHalf(level)));
     }
 
     for (uint32_t probe : {0x100, 0x10000, 0x1000000})
     {
-        for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+        for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
         {
-            auto sz = formatU32(BucketList::sizeOfCurr(probe, level));
+            auto sz = formatU32(LiveBucketList::sizeOfCurr(probe, level));
             CLOG_INFO(Bucket, "sizeOfCurr({:#x}, {}) = {} (precisely)", probe,
                       level, sz);
         }
 
-        for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+        for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
         {
-            auto sz = formatU32(BucketList::sizeOfSnap(probe, level));
+            auto sz = formatU32(LiveBucketList::sizeOfSnap(probe, level));
             CLOG_INFO(Bucket, "sizeOfSnap({:#x}, {}) = {} (precisely)", probe,
                       level, sz);
         }
@@ -1375,17 +1672,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]")
     std::vector<std::vector<uint32_t>> spillEvents;
     std::vector<std::vector<uint32_t>> nonMergeCommitEvents;
     std::vector<std::vector<uint32_t>> mergeCommitEvents;
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         spillEvents.push_back({});
         nonMergeCommitEvents.push_back({});
         mergeCommitEvents.push_back({});
     }
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         for (uint32_t ledger = 0; ledger < 0x1000000; ++ledger)
         {
-            if (BucketList::levelShouldSpill(ledger, level))
+            if (LiveBucketList::levelShouldSpill(ledger, level))
             {
                 spillEvents[level].push_back(ledger);
                 if (spillEvents[level].size() > 5)
@@ -1393,11 +1690,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]")
                     break;
                 }
             }
-            if (level != 0 && BucketList::levelShouldSpill(ledger, level - 1))
+            if (level != 0 &&
+                LiveBucketList::levelShouldSpill(ledger, level - 1))
             {
                 uint32_t nextChangeLedger =
-                    ledger + BucketList::levelHalf(level - 1);
-                if (BucketList::levelShouldSpill(nextChangeLedger, level))
+                    ledger + LiveBucketList::levelHalf(level - 1);
+                if (LiveBucketList::levelShouldSpill(nextChangeLedger, level))
                 {
                     nonMergeCommitEvents[level].push_back(ledger);
                 }
@@ -1408,17 +1706,17 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]")
             }
         }
     }
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         auto ls = formatLedgerList(spillEvents[level]);
         CLOG_INFO(Bucket, "levelShouldSpill({:#x}) = true @ {}", level, ls);
     }
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         auto ls = formatLedgerList(mergeCommitEvents[level]);
         CLOG_INFO(Bucket, "mergeCommit({:#x}) @ {}", level, ls);
     }
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
         auto ls = formatLedgerList(nonMergeCommitEvents[level]);
         CLOG_INFO(Bucket, "nonMergeCommit({:#x}) @ {}", level, ls);
@@ -1427,12 +1725,12 @@ TEST_CASE("BucketList number dump", "[bucket][bucketlist][count][!hide]")
     // Print out the full bucketlist at an arbitrarily-chosen probe ledger.
     uint32_t probe = 0x11f9ab;
     CLOG_INFO(Bucket, "BucketList state at {:#x}", probe);
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
-        uint32_t currOld = BucketList::oldestLedgerInCurr(probe, level);
-        uint32_t snapOld = BucketList::oldestLedgerInSnap(probe, level);
-        uint32_t currSz = BucketList::sizeOfCurr(probe, level);
-        uint32_t snapSz = BucketList::sizeOfSnap(probe, level);
+        uint32_t currOld = LiveBucketList::oldestLedgerInCurr(probe, level);
+        uint32_t snapOld = LiveBucketList::oldestLedgerInSnap(probe, level);
+        uint32_t currSz = LiveBucketList::sizeOfCurr(probe, level);
+        uint32_t snapSz = LiveBucketList::sizeOfSnap(probe, level);
         uint32_t currNew = currOld + currSz - 1;
         uint32_t snapNew = snapOld + snapSz - 1;
         CLOG_INFO(
diff --git a/src/bucket/test/BucketManagerTests.cpp b/src/bucket/test/BucketManagerTests.cpp
index fd7653205b..c2f51c35b6 100644
--- a/src/bucket/test/BucketManagerTests.cpp
+++ b/src/bucket/test/BucketManagerTests.cpp
@@ -40,11 +40,11 @@ namespace BucketManagerTests
 {
 
 static void
-clearFutures(Application::pointer app, BucketList& bl)
+clearFutures(Application::pointer app, LiveBucketList& bl)
 {
 
     // First go through the BL and mop up all the FutureBuckets.
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
         bl.getLevel(i).getNext().clear();
     }
@@ -54,13 +54,9 @@ clearFutures(Application::pointer app, BucketList& bl)
 
     size_t n = static_cast<size_t>(app->getConfig().WORKER_THREADS);
 
-    // If background eviction is enabled, we have one fewer worker thread for
-    // bucket merges
-    if (app->getConfig().isUsingBackgroundEviction())
-    {
-        releaseAssert(n != 0);
-        --n;
-    }
+    // Background eviction takes up one worker thread.
+    releaseAssert(n != 0);
+    --n;
 
     std::mutex mutex;
     std::condition_variable cv, cv2;
@@ -193,113 +189,90 @@ TEST_CASE("skip list", "[bucket][bucketmanager]")
 
 TEST_CASE_VERSIONS("bucketmanager ownership", "[bucket][bucketmanager]")
 {
-    auto test = [&](bool bucketListDB) {
-        VirtualClock clock;
-        Config cfg = getTestConfig();
-
-        // Make sure all Buckets serialize indexes to disk for test
-        cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0;
-        cfg.MANUAL_CLOSE = false;
-
-        if (bucketListDB)
-        {
-            // Enable BucketListDB with persistent indexes
-            cfg.DEPRECATED_SQL_LEDGER_STATE = false;
-            cfg.NODE_IS_VALIDATOR = false;
-            cfg.FORCE_SCP = false;
-        }
-
-        for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
-            Application::pointer app = createTestApplication(clock, cfg);
-
-            std::vector<LedgerEntry> live(
-                LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
-                    {CONFIG_SETTING}, 10));
-            std::vector<LedgerKey> dead{};
-
-            std::shared_ptr<Bucket> b1;
-
-            {
-                std::shared_ptr<Bucket> b2 = Bucket::fresh(
-                    app->getBucketManager(), getAppLedgerVersion(app), {}, live,
-                    dead, /*countMergeEvents=*/true, clock.getIOContext(),
-                    /*doFsync=*/true);
-                b1 = b2;
-
-                // Bucket is referenced by b1, b2 and the BucketManager.
-                CHECK(b1.use_count() == 3);
-
-                std::shared_ptr<Bucket> b3 = Bucket::fresh(
-                    app->getBucketManager(), getAppLedgerVersion(app), {}, live,
-                    dead, /*countMergeEvents=*/true, clock.getIOContext(),
-                    /*doFsync=*/true);
-                std::shared_ptr<Bucket> b4 = Bucket::fresh(
-                    app->getBucketManager(), getAppLedgerVersion(app), {}, live,
-                    dead, /*countMergeEvents=*/true, clock.getIOContext(),
-                    /*doFsync=*/true);
-                // Bucket is referenced by b1, b2, b3, b4 and the BucketManager.
-                CHECK(b1.use_count() == 5);
-            }
-
-            // Take pointer by reference to not mess up use_count()
-            auto dropBucket = [&](std::shared_ptr<Bucket>& b) {
-                std::string filename = b->getFilename().string();
-                std::string indexFilename =
-                    app->getBucketManager().bucketIndexFilename(b->getHash());
-                CHECK(fs::exists(filename));
-                if (bucketListDB)
-                {
-                    CHECK(fs::exists(indexFilename));
-                }
+    VirtualClock clock;
+    Config cfg = getTestConfig();
 
-                b.reset();
-                app->getBucketManager().forgetUnreferencedBuckets();
-                CHECK(!fs::exists(filename));
-                CHECK(!fs::exists(indexFilename));
-            };
+    // Make sure all Buckets serialize indexes to disk for test
+    cfg.BUCKETLIST_DB_INDEX_CUTOFF = 0;
+    cfg.MANUAL_CLOSE = false;
 
-            // Bucket is now only referenced by b1 and the BucketManager.
-            CHECK(b1.use_count() == 2);
+    for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
+        Application::pointer app = createTestApplication(clock, cfg);
 
-            // Drop bucket ourselves then purge bucketManager.
-            dropBucket(b1);
+        std::vector<LedgerEntry> live(
+            LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
+                {CONFIG_SETTING}, 10));
+        std::vector<LedgerKey> dead{};
 
-            // Try adding a bucket to the BucketManager's bucketlist
-            auto& bl = app->getBucketManager().getBucketList();
-            bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead);
-            clearFutures(app, bl);
-            b1 = bl.getLevel(0).getCurr();
+        std::shared_ptr<LiveBucket> b1;
 
-            // Bucket should be referenced by bucketlist itself, BucketManager
-            // cache and b1.
-            CHECK(b1.use_count() == 3);
+        {
+            std::shared_ptr<LiveBucket> b2 = LiveBucket::fresh(
+                app->getBucketManager(), getAppLedgerVersion(app), {}, live,
+                dead, /*countMergeEvents=*/true, clock.getIOContext(),
+                /*doFsync=*/true);
+            b1 = b2;
 
-            // This shouldn't change if we forget unreferenced buckets since
-            // it's referenced by bucketlist.
-            app->getBucketManager().forgetUnreferencedBuckets();
+            // Bucket is referenced by b1, b2 and the BucketManager.
             CHECK(b1.use_count() == 3);
 
-            // But if we mutate the curr bucket of the bucketlist, it should.
-            live[0] = LedgerTestUtils::generateValidLedgerEntryWithExclusions(
-                {CONFIG_SETTING});
-            bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead);
-            clearFutures(app, bl);
-            CHECK(b1.use_count() == 2);
-
-            // Drop it again.
-            dropBucket(b1);
-        });
-    };
+            std::shared_ptr<LiveBucket> b3 = LiveBucket::fresh(
+                app->getBucketManager(), getAppLedgerVersion(app), {}, live,
+                dead, /*countMergeEvents=*/true, clock.getIOContext(),
+                /*doFsync=*/true);
+            std::shared_ptr<LiveBucket> b4 = LiveBucket::fresh(
+                app->getBucketManager(), getAppLedgerVersion(app), {}, live,
+                dead, /*countMergeEvents=*/true, clock.getIOContext(),
+                /*doFsync=*/true);
+            // Bucket is referenced by b1, b2, b3, b4 and the BucketManager.
+            CHECK(b1.use_count() == 5);
+        }
 
-    SECTION("BucketListDB")
-    {
-        test(true);
-    }
+        // Take pointer by reference to not mess up use_count()
+        auto dropBucket = [&](std::shared_ptr<LiveBucket>& b) {
+            std::string filename = b->getFilename().string();
+            std::string indexFilename =
+                app->getBucketManager().bucketIndexFilename(b->getHash());
+            CHECK(fs::exists(filename));
+            CHECK(fs::exists(indexFilename));
 
-    SECTION("SQL")
-    {
-        test(false);
-    }
+            b.reset();
+            app->getBucketManager().forgetUnreferencedBuckets();
+            CHECK(!fs::exists(filename));
+            CHECK(!fs::exists(indexFilename));
+        };
+
+        // Bucket is now only referenced by b1 and the BucketManager.
+        CHECK(b1.use_count() == 2);
+
+        // Drop bucket ourselves then purge bucketManager.
+        dropBucket(b1);
+
+        // Try adding a bucket to the BucketManager's bucketlist
+        auto& bl = app->getBucketManager().getLiveBucketList();
+        bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead);
+        clearFutures(app, bl);
+        b1 = bl.getLevel(0).getCurr();
+
+        // Bucket should be referenced by bucketlist itself, BucketManager
+        // cache and b1.
+        CHECK(b1.use_count() == 3);
+
+        // This shouldn't change if we forget unreferenced buckets since
+        // it's referenced by bucketlist.
+        app->getBucketManager().forgetUnreferencedBuckets();
+        CHECK(b1.use_count() == 3);
+
+        // But if we mutate the curr bucket of the bucketlist, it should.
+        live[0] = LedgerTestUtils::generateValidLedgerEntryWithExclusions(
+            {CONFIG_SETTING});
+        bl.addBatch(*app, 1, getAppLedgerVersion(app), {}, live, dead);
+        clearFutures(app, bl);
+        CHECK(b1.use_count() == 2);
+
+        // Drop it again.
+        dropBucket(b1);
+    });
 }
 
 TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]")
@@ -310,7 +283,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]")
         VirtualClock clock;
         auto app = createTestApplication<BucketTestApplication>(clock, cfg);
         BucketManager& bm = app->getBucketManager();
-        BucketList& bl = bm.getBucketList();
+        LiveBucketList& bl = bm.getLiveBucketList();
         LedgerManagerForBucketTests& lm = app->getLedgerManager();
 
         uint32_t ledger = 0;
@@ -324,7 +297,7 @@ TEST_CASE("bucketmanager missing buckets fail", "[bucket][bucketmanager]")
                     {CONFIG_SETTING}, 10),
                 {});
             closeLedger(*app);
-        } while (!BucketList::levelShouldSpill(ledger, level - 1));
+        } while (!LiveBucketList::levelShouldSpill(ledger, level - 1));
         auto someBucket = bl.getLevel(1).getCurr();
         someBucketFileName = someBucket->getFilename().string();
     }
@@ -353,7 +326,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge",
         Application::pointer app = createTestApplication(clock, cfg);
 
         BucketManager& bm = app->getBucketManager();
-        BucketList& bl = bm.getBucketList();
+        LiveBucketList& bl = bm.getLiveBucketList();
         auto vers = getAppLedgerVersion(app);
 
         // Add some entries to get to a nontrivial merge-state.
@@ -365,13 +338,13 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge",
             auto lh =
                 app->getLedgerManager().getLastClosedLedgerHeader().header;
             lh.ledgerSeq = ledger;
-            addBatchAndUpdateSnapshot(
-                bl, *app, lh, {},
+            addLiveBatchAndUpdateSnapshot(
+                *app, lh, {},
                 LedgerTestUtils::generateValidLedgerEntriesWithExclusions(
                     {CONFIG_SETTING}, 10),
                 {});
             bm.forgetUnreferencedBuckets();
-        } while (!BucketList::levelShouldSpill(ledger, level - 1));
+        } while (!LiveBucketList::levelShouldSpill(ledger, level - 1));
 
         // Check that the merge on level isn't committed (we're in
         // ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING mode that does not resolve
@@ -396,7 +369,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to finished merge",
 
         // Reattach to _finished_ merge future on level.
         has2.currentBuckets[level].next.makeLive(
-            *app, vers, BucketList::keepDeadEntries(level));
+            *app, vers, LiveBucketList::keepTombstoneEntries(level));
         REQUIRE(has2.currentBuckets[level].next.isMerging());
 
         // Resolve reattached future.
@@ -420,7 +393,7 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge",
         Application::pointer app = createTestApplication(clock, cfg);
 
         BucketManager& bm = app->getBucketManager();
-        BucketList& bl = bm.getBucketList();
+        LiveBucketList& bl = bm.getLiveBucketList();
         auto vers = getAppLedgerVersion(app);
 
         // This test is a race that will (if all goes well) eventually be won:
@@ -454,8 +427,8 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge",
             auto lh =
                 app->getLedgerManager().getLastClosedLedgerHeader().header;
             lh.ledgerSeq = ledger;
-            addBatchAndUpdateSnapshot(
-                bl, *app, lh, {},
+            addLiveBatchAndUpdateSnapshot(
+                *app, lh, {},
                 LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
                     {CONFIG_SETTING}, 100),
                 {});
@@ -473,12 +446,14 @@ TEST_CASE_VERSIONS("bucketmanager reattach to running merge",
             // win quite shortly).
             HistoryArchiveState has2;
             has2.fromString(serialHas);
-            for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+            for (uint32_t level = 0; level < LiveBucketList::kNumLevels;
+                 ++level)
             {
                 if (has2.currentBuckets[level].next.hasHashes())
                 {
                     has2.currentBuckets[level].next.makeLive(
-                        *app, vers, BucketList::keepDeadEntries(level));
+                        *app, vers,
+                        LiveBucketList::keepTombstoneEntries(level));
                 }
             }
         }
@@ -499,17 +474,17 @@ TEST_CASE("bucketmanager do not leak empty-merge futures",
     // are thereby not leaking. Disable BucketListDB so that snapshots do not
     // hold persist buckets, complicating bucket counting.
     VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
     cfg.ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING = true;
     cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
         static_cast<uint32_t>(
-            Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
+            LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
         1;
 
     auto app = createTestApplication<BucketTestApplication>(clock, cfg);
 
     BucketManager& bm = app->getBucketManager();
-    BucketList& bl = bm.getBucketList();
+    LiveBucketList& bl = bm.getLiveBucketList();
     LedgerManagerForBucketTests& lm = app->getLedgerManager();
 
     // We create 8 live ledger entries spread across 8 ledgers then add a ledger
@@ -578,8 +553,6 @@ TEST_CASE_VERSIONS(
         auto vers = getAppLedgerVersion(app);
         auto& hm = app->getHistoryManager();
         auto& bm = app->getBucketManager();
-        auto& bl = bm.getBucketList();
-        auto& lm = app->getLedgerManager();
         hm.setPublicationEnabled(false);
         app->getHistoryArchiveManager().initializeHistoryArchive(
             tcfg.getArchiveDirName());
@@ -595,8 +568,8 @@ TEST_CASE_VERSIONS(
             auto lh =
                 app->getLedgerManager().getLastClosedLedgerHeader().header;
             lh.ledgerSeq++;
-            addBatchAndUpdateSnapshot(
-                bl, *app, lh, {},
+            addLiveBatchAndUpdateSnapshot(
+                *app, lh, {},
                 LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
                     {CONFIG_SETTING}, 100),
                 {});
@@ -619,7 +592,7 @@ TEST_CASE_VERSIONS(
 
         auto ra = bm.readMergeCounters().mFinishedMergeReattachments;
         if (protocolVersionIsBefore(vers,
-                                    Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+                                    LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
         {
             // Versions prior to FIRST_PROTOCOL_SHADOWS_REMOVED re-attach to
             // finished merges
@@ -682,9 +655,9 @@ TEST_CASE_VERSIONS(
 class StopAndRestartBucketMergesTest
 {
     static void
-    resolveAllMerges(BucketList& bl)
+    resolveAllMerges(LiveBucketList& bl)
     {
-        for (uint32 i = 0; i < BucketList::kNumLevels; ++i)
+        for (uint32 i = 0; i < LiveBucketList::kNumLevels; ++i)
         {
             auto& level = bl.getLevel(i);
             auto& next = level.getNext();
@@ -770,8 +743,8 @@ class StopAndRestartBucketMergesTest
         checkSensiblePostInitEntryMergeCounters(uint32_t protocol) const
         {
             CHECK(mMergeCounters.mPostInitEntryProtocolMerges != 0);
-            if (protocolVersionIsBefore(protocol,
-                                        Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+            if (protocolVersionIsBefore(
+                    protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
             {
                 CHECK(mMergeCounters.mPostShadowRemovalProtocolMerges == 0);
             }
@@ -797,8 +770,8 @@ class StopAndRestartBucketMergesTest
             CHECK(mMergeCounters.mOldInitEntriesMergedWithNewDead != 0);
             CHECK(mMergeCounters.mNewEntriesMergedWithOldNeitherInit != 0);
 
-            if (protocolVersionIsBefore(protocol,
-                                        Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+            if (protocolVersionIsBefore(
+                    protocol, LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
             {
                 CHECK(mMergeCounters.mShadowScanSteps != 0);
                 CHECK(mMergeCounters.mLiveEntryShadowElisions != 0);
@@ -933,14 +906,14 @@ class StopAndRestartBucketMergesTest
         {
             LedgerManager& lm = app.getLedgerManager();
             BucketManager& bm = app.getBucketManager();
-            BucketList& bl = bm.getBucketList();
+            LiveBucketList& bl = bm.getLiveBucketList();
             // Complete those merges we're about to inspect.
             resolveAllMerges(bl);
 
             mMergeCounters = bm.readMergeCounters();
             mLedgerHeaderHash = lm.getLastClosedLedgerHeader().hash;
             mBucketListHash = bl.getHash();
-            BucketLevel& blv = bl.getLevel(level);
+            BucketLevel<LiveBucket>& blv = bl.getLevel(level);
             mCurrBucketHash = blv.getCurr()->getHash();
             mSnapBucketHash = blv.getSnap()->getHash();
         }
@@ -959,13 +932,13 @@ class StopAndRestartBucketMergesTest
     collectLedgerEntries(Application& app,
                          std::map<LedgerKey, LedgerEntry>& entries)
     {
-        auto bl = app.getBucketManager().getBucketList();
-        for (uint32_t i = BucketList::kNumLevels; i > 0; --i)
+        auto bl = app.getBucketManager().getLiveBucketList();
+        for (uint32_t i = LiveBucketList::kNumLevels; i > 0; --i)
         {
-            BucketLevel const& level = bl.getLevel(i - 1);
+            BucketLevel<LiveBucket> const& level = bl.getLevel(i - 1);
             for (auto bucket : {level.getSnap(), level.getCurr()})
             {
-                for (BucketInputIterator bi(bucket); bi; ++bi)
+                for (LiveBucketInputIterator bi(bucket); bi; ++bi)
                 {
                     BucketEntry const& e = *bi;
                     if (e.type() == LIVEENTRY || e.type() == INITENTRY)
@@ -1008,10 +981,11 @@ class StopAndRestartBucketMergesTest
     void
     calculateDesignatedLedgers()
     {
-        uint32_t spillFreq = BucketList::levelHalf(mDesignatedLevel);
-        uint32_t prepFreq = (mDesignatedLevel == 0
-                                 ? 1
-                                 : BucketList::levelHalf(mDesignatedLevel - 1));
+        uint32_t spillFreq = LiveBucketList::levelHalf(mDesignatedLevel);
+        uint32_t prepFreq =
+            (mDesignatedLevel == 0
+                 ? 1
+                 : LiveBucketList::levelHalf(mDesignatedLevel - 1));
 
         uint32_t const SPILLCOUNT = 5;
         uint32_t const PREPCOUNT = 5;
@@ -1215,7 +1189,7 @@ class StopAndRestartBucketMergesTest
             lm.setNextLedgerEntryBatchForBucketTesting(
                 mInitEntryBatches[i - 2], mLiveEntryBatches[i - 2],
                 mDeadEntryBatches[i - 2]);
-            resolveAllMerges(app->getBucketManager().getBucketList());
+            resolveAllMerges(app->getBucketManager().getLiveBucketList());
             auto countersBeforeClose =
                 app->getBucketManager().readMergeCounters();
 
@@ -1243,13 +1217,15 @@ class StopAndRestartBucketMergesTest
             auto j = mControlSurveys.find(i);
             if (j != mControlSurveys.end())
             {
-                if (BucketList::levelShouldSpill(i, mDesignatedLevel - 1))
+                if (LiveBucketList::levelShouldSpill(i, mDesignatedLevel - 1))
                 {
                     // Confirm that there's a merge-in-progress at this level
                     // (closing ledger i should have provoked a spill from
                     // mDesignatedLevel-1 to mDesignatedLevel)
-                    BucketList& bl = app->getBucketManager().getBucketList();
-                    BucketLevel& blv = bl.getLevel(mDesignatedLevel);
+                    LiveBucketList& bl =
+                        app->getBucketManager().getLiveBucketList();
+                    BucketLevel<LiveBucket>& blv =
+                        bl.getLevel(mDesignatedLevel);
                     REQUIRE(blv.getNext().isMerging());
                 }
 
@@ -1277,11 +1253,13 @@ class StopAndRestartBucketMergesTest
                 clock = std::make_unique<VirtualClock>();
                 app = createTestApplication<BucketTestApplication>(*clock, cfg,
                                                                    false);
-                if (BucketList::levelShouldSpill(i, mDesignatedLevel - 1))
+                if (LiveBucketList::levelShouldSpill(i, mDesignatedLevel - 1))
                 {
                     // Confirm that the merge-in-progress was restarted.
-                    BucketList& bl = app->getBucketManager().getBucketList();
-                    BucketLevel& blv = bl.getLevel(mDesignatedLevel);
+                    LiveBucketList& bl =
+                        app->getBucketManager().getLiveBucketList();
+                    BucketLevel<LiveBucket>& blv =
+                        bl.getLevel(mDesignatedLevel);
                     REQUIRE(blv.getNext().isMerging());
                 }
 
@@ -1315,7 +1293,7 @@ class StopAndRestartBucketMergesTest
         assert(!mControlSurveys.empty());
         if (protocolVersionStartsFrom(
                 mProtocol,
-                Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
+                LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY))
         {
             mControlSurveys.rbegin()->second.dumpMergeCounters(
                 "control, Post-INITENTRY", mDesignatedLevel);
@@ -1339,11 +1317,11 @@ TEST_CASE("bucket persistence over app restart with initentry",
 {
     for (uint32_t protocol :
          {static_cast<uint32_t>(
-              Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
+              LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
               1,
           static_cast<uint32_t>(
-              Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY),
-          static_cast<uint32_t>(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)})
+              LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY),
+          static_cast<uint32_t>(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)})
     {
         for (uint32_t level : {2, 3})
         {
@@ -1359,11 +1337,11 @@ TEST_CASE("bucket persistence over app restart with initentry - extended",
 {
     for (uint32_t protocol :
          {static_cast<uint32_t>(
-              Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
+              LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
               1,
           static_cast<uint32_t>(
-              Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY),
-          static_cast<uint32_t>(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)})
+              LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY),
+          static_cast<uint32_t>(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)})
     {
         for (uint32_t level : {2, 3, 4, 5})
         {
@@ -1418,7 +1396,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart",
             VirtualClock clock;
             Application::pointer app = createTestApplication(clock, cfg0);
             sk = std::make_optional<SecretKey>(cfg0.NODE_SEED);
-            BucketList& bl = app->getBucketManager().getBucketList();
+            LiveBucketList& bl = app->getBucketManager().getLiveBucketList();
 
             uint32_t i = 2;
             while (i < pause)
@@ -1453,7 +1431,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart",
         {
             VirtualClock clock;
             Application::pointer app = createTestApplication(clock, cfg1);
-            BucketList& bl = app->getBucketManager().getBucketList();
+            LiveBucketList& bl = app->getBucketManager().getLiveBucketList();
 
             uint32_t i = 2;
             while (i < pause)
@@ -1480,7 +1458,7 @@ TEST_CASE_VERSIONS("bucket persistence over app restart",
             VirtualClock clock;
             Application::pointer app = Application::create(clock, cfg1, false);
             app->start();
-            BucketList& bl = app->getBucketManager().getBucketList();
+            LiveBucketList& bl = app->getBucketManager().getLiveBucketList();
 
             // Confirm that we re-acquired the close-ledger state.
             REQUIRE(
diff --git a/src/bucket/test/BucketMergeMapTests.cpp b/src/bucket/test/BucketMergeMapTests.cpp
index b5f9ea81a8..c5883e1c82 100644
--- a/src/bucket/test/BucketMergeMapTests.cpp
+++ b/src/bucket/test/BucketMergeMapTests.cpp
@@ -22,7 +22,7 @@ TEST_CASE("bucket merge map", "[bucket][bucketmergemap]")
         std::vector<LedgerEntry> live =
             LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
                 {CONFIG_SETTING}, numEntries);
-        std::shared_ptr<Bucket> b1 = Bucket::fresh(
+        std::shared_ptr<LiveBucket> b1 = LiveBucket::fresh(
             app->getBucketManager(), BucketTestUtils::getAppLedgerVersion(app),
             {}, live, {},
             /*countMergeEvents=*/true, clock.getIOContext(),
@@ -30,41 +30,44 @@ TEST_CASE("bucket merge map", "[bucket][bucketmergemap]")
         return b1;
     };
 
-    std::shared_ptr<Bucket> in1a = getValidBucket();
-    std::shared_ptr<Bucket> in1b = getValidBucket();
-    std::shared_ptr<Bucket> in1c = getValidBucket();
+    std::shared_ptr<LiveBucket> in1a = getValidBucket();
+    std::shared_ptr<LiveBucket> in1b = getValidBucket();
+    std::shared_ptr<LiveBucket> in1c = getValidBucket();
 
-    std::shared_ptr<Bucket> in2a = getValidBucket();
-    std::shared_ptr<Bucket> in2b = getValidBucket();
-    std::shared_ptr<Bucket> in2c = getValidBucket();
+    std::shared_ptr<LiveBucket> in2a = getValidBucket();
+    std::shared_ptr<LiveBucket> in2b = getValidBucket();
+    std::shared_ptr<LiveBucket> in2c = getValidBucket();
 
-    std::shared_ptr<Bucket> in3a = getValidBucket();
-    std::shared_ptr<Bucket> in3b = getValidBucket();
-    std::shared_ptr<Bucket> in3c = getValidBucket();
-    std::shared_ptr<Bucket> in3d = getValidBucket();
+    std::shared_ptr<LiveBucket> in3a = getValidBucket();
+    std::shared_ptr<LiveBucket> in3b = getValidBucket();
+    std::shared_ptr<LiveBucket> in3c = getValidBucket();
+    std::shared_ptr<LiveBucket> in3d = getValidBucket();
 
-    std::shared_ptr<Bucket> in4a = getValidBucket();
-    std::shared_ptr<Bucket> in4b = getValidBucket();
+    std::shared_ptr<LiveBucket> in4a = getValidBucket();
+    std::shared_ptr<LiveBucket> in4b = getValidBucket();
 
-    std::shared_ptr<Bucket> in5a = getValidBucket();
-    std::shared_ptr<Bucket> in5b = getValidBucket();
+    std::shared_ptr<LiveBucket> in5a = getValidBucket();
+    std::shared_ptr<LiveBucket> in5b = getValidBucket();
 
-    std::shared_ptr<Bucket> in6a = getValidBucket();
-    std::shared_ptr<Bucket> in6b = getValidBucket();
+    std::shared_ptr<LiveBucket> in6a = getValidBucket();
+    std::shared_ptr<LiveBucket> in6b = getValidBucket();
 
-    std::shared_ptr<Bucket> out1 = getValidBucket();
-    std::shared_ptr<Bucket> out2 = getValidBucket();
-    std::shared_ptr<Bucket> out4 = getValidBucket();
-    std::shared_ptr<Bucket> out6 = getValidBucket();
+    std::shared_ptr<LiveBucket> out1 = getValidBucket();
+    std::shared_ptr<LiveBucket> out2 = getValidBucket();
+    std::shared_ptr<LiveBucket> out4 = getValidBucket();
+    std::shared_ptr<LiveBucket> out6 = getValidBucket();
 
     BucketMergeMap bmm;
 
-    MergeKey m1{true, in1a, in1b, {in1c}};
-    MergeKey m2{true, in2a, in2b, {in2c}};
-    MergeKey m3{true, in3a, in3b, {in3c, in3d}};
-    MergeKey m4{true, in4a, in4b, {}};
-    MergeKey m5{true, in5a, in5b, {}};
-    MergeKey m6{true, in6a, in6b, {in1a}};
+    MergeKey m1{true, in1a->getHash(), in1b->getHash(), {in1c->getHash()}};
+    MergeKey m2{true, in2a->getHash(), in2b->getHash(), {in2c->getHash()}};
+    MergeKey m3{true,
+                in3a->getHash(),
+                in3b->getHash(),
+                {in3c->getHash(), in3d->getHash()}};
+    MergeKey m4{true, in4a->getHash(), in4b->getHash(), {}};
+    MergeKey m5{true, in5a->getHash(), in5b->getHash(), {}};
+    MergeKey m6{true, in6a->getHash(), in6b->getHash(), {in1a->getHash()}};
 
     bmm.recordMerge(m1, out1->getHash());
     bmm.recordMerge(m2, out2->getHash());
diff --git a/src/bucket/test/BucketTestUtils.cpp b/src/bucket/test/BucketTestUtils.cpp
index 0200e49442..873e237cb3 100644
--- a/src/bucket/test/BucketTestUtils.cpp
+++ b/src/bucket/test/BucketTestUtils.cpp
@@ -3,6 +3,7 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "BucketTestUtils.h"
+#include "bucket/Bucket.h"
 #include "bucket/BucketInputIterator.h"
 #include "bucket/BucketManager.h"
 #include "crypto/Hex.h"
@@ -10,6 +11,8 @@
 #include "ledger/LedgerTxn.h"
 #include "main/Application.h"
 #include "test/test.h"
+#include "xdr/Stellar-ledger.h"
+#include <memory>
 
 namespace stellar
 {
@@ -30,18 +33,43 @@ getAppLedgerVersion(Application::pointer app)
 }
 
 void
-addBatchAndUpdateSnapshot(BucketList& bl, Application& app, LedgerHeader header,
-                          std::vector<LedgerEntry> const& initEntries,
-                          std::vector<LedgerEntry> const& liveEntries,
-                          std::vector<LedgerKey> const& deadEntries)
+addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header,
+                              std::vector<LedgerEntry> const& initEntries,
+                              std::vector<LedgerEntry> const& liveEntries,
+                              std::vector<LedgerKey> const& deadEntries)
 {
-    bl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries,
-                liveEntries, deadEntries);
-    if (app.getConfig().isUsingBucketListDB())
-    {
-        app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot(
-            std::make_unique<BucketListSnapshot>(bl, header));
-    }
+    auto& liveBl = app.getBucketManager().getLiveBucketList();
+    liveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion, initEntries,
+                    liveEntries, deadEntries);
+
+    auto liveSnapshot =
+        std::make_unique<BucketListSnapshot<LiveBucket>>(liveBl, header);
+    auto hotArchiveSnapshot =
+        std::make_unique<BucketListSnapshot<HotArchiveBucket>>(
+            app.getBucketManager().getHotArchiveBucketList(), header);
+
+    app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot(
+        std::move(liveSnapshot), std::move(hotArchiveSnapshot));
+}
+
+void
+addHotArchiveBatchAndUpdateSnapshot(
+    Application& app, LedgerHeader header,
+    std::vector<LedgerEntry> const& archiveEntries,
+    std::vector<LedgerKey> const& restoredEntries,
+    std::vector<LedgerKey> const& deletedEntries)
+{
+    auto& hotArchiveBl = app.getBucketManager().getHotArchiveBucketList();
+    hotArchiveBl.addBatch(app, header.ledgerSeq, header.ledgerVersion,
+                          archiveEntries, restoredEntries, deletedEntries);
+    auto liveSnapshot = std::make_unique<BucketListSnapshot<LiveBucket>>(
+        app.getBucketManager().getLiveBucketList(), header);
+    auto hotArchiveSnapshot =
+        std::make_unique<BucketListSnapshot<HotArchiveBucket>>(hotArchiveBl,
+                                                               header);
+
+    app.getBucketManager().getBucketSnapshotManager().updateCurrentSnapshot(
+        std::move(liveSnapshot), std::move(hotArchiveSnapshot));
 }
 
 void
@@ -50,21 +78,14 @@ for_versions_with_differing_bucket_logic(
 {
     for_versions(
         {static_cast<uint32_t>(
-             Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
+             LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
              1,
          static_cast<uint32_t>(
-             Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY),
-         static_cast<uint32_t>(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED)},
+             LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY),
+         static_cast<uint32_t>(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED)},
         cfg, f);
 }
 
-size_t
-countEntries(std::shared_ptr<Bucket> bucket)
-{
-    EntryCounts e(bucket);
-    return e.sum();
-}
-
 Hash
 closeLedger(Application& app, std::optional<SecretKey> skToSignValue,
             xdr::xvector<UpgradeType, 6> upgrades)
@@ -74,7 +95,7 @@ closeLedger(Application& app, std::optional<SecretKey> skToSignValue,
     uint32_t ledgerNum = lcl.header.ledgerSeq + 1;
     CLOG_INFO(Bucket, "Artificially closing ledger {} with lcl={}, buckets={}",
               ledgerNum, hexAbbrev(lcl.hash),
-              hexAbbrev(app.getBucketManager().getBucketList().getHash()));
+              hexAbbrev(app.getBucketManager().getLiveBucketList().getHash()));
     app.getHerder().externalizeValue(TxSetXDRFrame::makeEmpty(lcl), ledgerNum,
                                      lcl.header.scpValue.closeTime, upgrades,
                                      skToSignValue);
@@ -87,9 +108,10 @@ closeLedger(Application& app)
     return closeLedger(app, std::nullopt);
 }
 
-EntryCounts::EntryCounts(std::shared_ptr<Bucket> bucket)
+template <>
+EntryCounts<LiveBucket>::EntryCounts(std::shared_ptr<LiveBucket> bucket)
 {
-    BucketInputIterator iter(bucket);
+    LiveBucketInputIterator iter(bucket);
     if (iter.seenMetadata())
     {
         ++nMeta;
@@ -99,10 +121,10 @@ EntryCounts::EntryCounts(std::shared_ptr<Bucket> bucket)
         switch ((*iter).type())
         {
         case INITENTRY:
-            ++nInit;
+            ++nInitOrArchived;
             break;
         case LIVEENTRY:
-            ++nLive;
+            ++nLiveOrHash;
             break;
         case DEADENTRY:
             ++nDead;
@@ -116,6 +138,83 @@ EntryCounts::EntryCounts(std::shared_ptr<Bucket> bucket)
     }
 }
 
+template <>
+EntryCounts<HotArchiveBucket>::EntryCounts(
+    std::shared_ptr<HotArchiveBucket> bucket)
+{
+    HotArchiveBucketInputIterator iter(bucket);
+    if (iter.seenMetadata())
+    {
+        ++nMeta;
+    }
+    while (iter)
+    {
+        switch ((*iter).type())
+        {
+        case HOT_ARCHIVE_ARCHIVED:
+            ++nInitOrArchived;
+            break;
+        case HOT_ARCHIVE_LIVE:
+            ++nLiveOrHash;
+            break;
+        case HOT_ARCHIVE_DELETED:
+            ++nDead;
+            break;
+        case HOT_ARCHIVE_METAENTRY:
+            // This should never happen: only the first record can be METAENTRY
+            // and it is counted above.
+            abort();
+        }
+        ++iter;
+    }
+}
+
+template <>
+EntryCounts<ColdArchiveBucket>::EntryCounts(
+    std::shared_ptr<ColdArchiveBucket> bucket)
+{
+    ColdArchiveBucketInputIterator iter(bucket);
+    if (iter.seenMetadata())
+    {
+        ++nMeta;
+    }
+    while (iter)
+    {
+        switch ((*iter).type())
+        {
+        case COLD_ARCHIVE_ARCHIVED_LEAF:
+            ++nInitOrArchived;
+            break;
+        case COLD_ARCHIVE_DELETED_LEAF:
+            ++nDead;
+            break;
+        case COLD_ARCHIVE_HASH:
+            ++nLiveOrHash;
+            break;
+        case COLD_ARCHIVE_BOUNDARY_LEAF:
+            ++nMeta;
+            break;
+        case COLD_ARCHIVE_METAENTRY:
+            // This should never happen: only the first record can be METAENTRY
+            // and it is counted above.
+            abort();
+        }
+        ++iter;
+    }
+}
+
+template <class BucketT>
+size_t
+countEntries(std::shared_ptr<BucketT> bucket)
+{
+    EntryCounts e(bucket);
+    return e.sum();
+}
+
+template size_t countEntries(std::shared_ptr<LiveBucket> bucket);
+template size_t countEntries(std::shared_ptr<HotArchiveBucket> bucket);
+template size_t countEntries(std::shared_ptr<ColdArchiveBucket> bucket);
+
 void
 LedgerManagerForBucketTests::transferLedgerEntriesToBucketList(
     AbstractLedgerTxn& ltx,
@@ -163,16 +262,8 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList(
                 }
 
                 LedgerTxn ltxEvictions(ltx);
-                if (mApp.getConfig().isUsingBackgroundEviction())
-                {
-                    mApp.getBucketManager().resolveBackgroundEvictionScan(
-                        ltxEvictions, lh.ledgerSeq, keys);
-                }
-                else
-                {
-                    mApp.getBucketManager().scanForEvictionLegacy(ltxEvictions,
-                                                                  lh.ledgerSeq);
-                }
+                mApp.getBucketManager().resolveBackgroundEvictionScan(
+                    ltxEvictions, lh.ledgerSeq, keys);
 
                 if (ledgerCloseMeta)
                 {
@@ -191,8 +282,7 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList(
         // Add dead entries from ltx to entries that will be added to BucketList
         // so we can test background eviction properly
         if (protocolVersionStartsFrom(initialLedgerVers,
-                                      SOROBAN_PROTOCOL_VERSION) &&
-            mApp.getConfig().isUsingBackgroundEviction())
+                                      SOROBAN_PROTOCOL_VERSION))
         {
             for (auto const& k : dead)
             {
@@ -201,8 +291,8 @@ LedgerManagerForBucketTests::transferLedgerEntriesToBucketList(
         }
 
         // Use the testing values.
-        mApp.getBucketManager().addBatch(mApp, lh, mTestInitEntries,
-                                         mTestLiveEntries, mTestDeadEntries);
+        mApp.getBucketManager().addLiveBatch(
+            mApp, lh, mTestInitEntries, mTestLiveEntries, mTestDeadEntries);
         mUseTestEntries = false;
     }
     else
diff --git a/src/bucket/test/BucketTestUtils.h b/src/bucket/test/BucketTestUtils.h
index eddd4ae95b..e84d9ff8eb 100644
--- a/src/bucket/test/BucketTestUtils.h
+++ b/src/bucket/test/BucketTestUtils.h
@@ -5,17 +5,23 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "test/TestUtils.h"
+#include "xdr/Stellar-ledger.h"
 
 namespace stellar
 {
 namespace BucketTestUtils
 {
 
-void addBatchAndUpdateSnapshot(BucketList& bl, Application& app,
-                               LedgerHeader header,
-                               std::vector<LedgerEntry> const& initEntries,
-                               std::vector<LedgerEntry> const& liveEntries,
-                               std::vector<LedgerKey> const& deadEntries);
+void addLiveBatchAndUpdateSnapshot(Application& app, LedgerHeader header,
+                                   std::vector<LedgerEntry> const& initEntries,
+                                   std::vector<LedgerEntry> const& liveEntries,
+                                   std::vector<LedgerKey> const& deadEntries);
+
+void addHotArchiveBatchAndUpdateSnapshot(
+    Application& app, LedgerHeader header,
+    std::vector<LedgerEntry> const& archiveEntries,
+    std::vector<LedgerKey> const& restoredEntries,
+    std::vector<LedgerKey> const& deletedEntries);
 
 uint32_t getAppLedgerVersion(Application& app);
 
@@ -24,27 +30,31 @@ uint32_t getAppLedgerVersion(std::shared_ptr<Application> app);
 void for_versions_with_differing_bucket_logic(
     Config const& cfg, std::function<void(Config const&)> const& f);
 
-struct EntryCounts
+template <class BucketT> struct EntryCounts
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket> ||
+                  std::is_same_v<BucketT, ColdArchiveBucket>);
+
     size_t nMeta{0};
-    size_t nInit{0};
-    size_t nLive{0};
+    size_t nInitOrArchived{0};
+    size_t nLiveOrHash{0};
     size_t nDead{0};
     size_t
     sum() const
     {
-        return nLive + nInit + nDead;
+        return nLiveOrHash + nInitOrArchived + nDead;
     }
     size_t
     sumIncludingMeta() const
     {
-        return nLive + nInit + nDead + nMeta;
+        return nLiveOrHash + nInitOrArchived + nDead + nMeta;
     }
 
-    EntryCounts(std::shared_ptr<stellar::Bucket> bucket);
+    EntryCounts(std::shared_ptr<BucketT> bucket);
 };
 
-size_t countEntries(std::shared_ptr<stellar::Bucket> bucket);
+template <class BucketT> size_t countEntries(std::shared_ptr<BucketT> bucket);
 
 Hash closeLedger(Application& app, std::optional<SecretKey> skToSignValue,
                  xdr::xvector<UpgradeType, 6> upgrades = emptyUpgradeSteps);
diff --git a/src/bucket/test/BucketTests.cpp b/src/bucket/test/BucketTests.cpp
index 4da46e26a7..50bb406a3b 100644
--- a/src/bucket/test/BucketTests.cpp
+++ b/src/bucket/test/BucketTests.cpp
@@ -48,10 +48,10 @@ for_versions_with_differing_initentry_logic(
 {
     for_versions(
         {static_cast<uint32_t>(
-             Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
+             LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY) -
              1,
          static_cast<uint32_t>(
-             Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)},
+             LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY)},
         cfg, f);
 }
 
@@ -67,7 +67,7 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]")
         auto dead = LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
             {CONFIG_SETTING}, 1000);
         CLOG_DEBUG(Bucket, "Hashing entries");
-        std::shared_ptr<Bucket> b1 = Bucket::fresh(
+        std::shared_ptr<LiveBucket> b1 = LiveBucket::fresh(
             app->getBucketManager(), getAppLedgerVersion(app), {}, live, dead,
             /*countMergeEvents=*/true, clock.getIOContext(),
             /*doFsync=*/true);
@@ -83,13 +83,13 @@ TEST_CASE_VERSIONS("file backed buckets", "[bucket][bucketbench]")
                 b1 = Bucket::merge(
                     app->getBucketManager(),
                     app->getConfig().LEDGER_PROTOCOL_VERSION, b1,
-                    Bucket::fresh(app->getBucketManager(),
-                                  getAppLedgerVersion(app), {}, live, dead,
-                                  /*countMergeEvents=*/true,
-                                  clock.getIOContext(),
-                                  /*doFsync=*/true),
+                    LiveBucket::fresh(app->getBucketManager(),
+                                      getAppLedgerVersion(app), {}, live, dead,
+                                      /*countMergeEvents=*/true,
+                                      clock.getIOContext(),
+                                      /*doFsync=*/true),
                     /*shadows=*/{},
-                    /*keepDeadEntries=*/true,
+                    /*keepTombstoneEntries=*/true,
                     /*countMergeEvents=*/true, clock.getIOContext(),
                     /*doFsync=*/true);
             }
@@ -161,16 +161,16 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]")
                     abort();
                 }
                 auto deadEntry = LedgerEntryKey(liveEntry);
-                auto bLive = Bucket::fresh(bm, vers, {}, {liveEntry}, {},
-                                           /*countMergeEvents=*/true,
-                                           clock.getIOContext(),
-                                           /*doFsync=*/true);
-                auto bDead = Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                                           /*countMergeEvents=*/true,
-                                           clock.getIOContext(),
-                                           /*doFsync=*/true);
+                auto bLive = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {},
+                                               /*countMergeEvents=*/true,
+                                               clock.getIOContext(),
+                                               /*doFsync=*/true);
+                auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                               /*countMergeEvents=*/true,
+                                               clock.getIOContext(),
+                                               /*doFsync=*/true);
                 auto b1 = Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{},
-                                        /*keepDeadEntries=*/true,
+                                        /*keepTombstoneEntries=*/true,
                                         /*countMergeEvents=*/true,
                                         clock.getIOContext(),
                                         /*doFsync=*/true);
@@ -200,24 +200,24 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]")
                     dead.push_back(LedgerEntryKey(e));
                 }
             }
-            auto bLive =
-                Bucket::fresh(bm, vers, {}, live, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto bDead =
-                Bucket::fresh(bm, vers, {}, {}, dead,
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto bLive = LiveBucket::fresh(bm, vers, {}, live, {},
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
+            auto bDead = LiveBucket::fresh(bm, vers, {}, {}, dead,
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
             auto b1 =
                 Bucket::merge(bm, vers, bLive, bDead, /*shadows=*/{},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             EntryCounts e(b1);
             CHECK(e.sum() == live.size());
-            CLOG_DEBUG(Bucket, "post-merge live count: {} of {}", e.nLive,
+            CLOG_DEBUG(Bucket, "post-merge live count: {} of {}", e.nLiveOrHash,
                        live.size());
-            CHECK(e.nLive == live.size() - dead.size());
+            CHECK(e.nLiveOrHash == live.size() - dead.size());
         }
 
         SECTION("random live entries overwrite live entries in any order")
@@ -226,7 +226,7 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]")
                 LedgerTestUtils::generateValidUniqueLedgerEntriesWithExclusions(
                     {CONFIG_SETTING}, 100);
             std::vector<LedgerKey> dead;
-            std::shared_ptr<Bucket> b1 = Bucket::fresh(
+            std::shared_ptr<LiveBucket> b1 = LiveBucket::fresh(
                 app->getBucketManager(), getAppLedgerVersion(app), {}, live,
                 dead, /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
@@ -258,14 +258,14 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]")
                     ++liveCount;
                 }
             }
-            std::shared_ptr<Bucket> b2 = Bucket::fresh(
+            std::shared_ptr<LiveBucket> b2 = LiveBucket::fresh(
                 app->getBucketManager(), getAppLedgerVersion(app), {}, live,
                 dead, /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
-            std::shared_ptr<Bucket> b3 =
+            std::shared_ptr<LiveBucket> b3 =
                 Bucket::merge(app->getBucketManager(),
                               app->getConfig().LEDGER_PROTOCOL_VERSION, b1, b2,
-                              /*shadows=*/{}, /*keepDeadEntries=*/true,
+                              /*shadows=*/{}, /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             CHECK(countEntries(b3) == liveCount);
@@ -273,6 +273,99 @@ TEST_CASE_VERSIONS("merging bucket entries", "[bucket]")
     });
 }
 
+TEST_CASE_VERSIONS("merging hot archive bucket entries", "[bucket][archival]")
+{
+    VirtualClock clock;
+    Config const& cfg = getTestConfig();
+
+    auto app = createTestApplication(clock, cfg);
+    for_versions_from(23, *app, [&] {
+        auto& bm = app->getBucketManager();
+        auto vers = getAppLedgerVersion(app);
+
+        SECTION("new annihilates old")
+        {
+            auto e1 =
+                LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE);
+            auto e2 =
+                LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_CODE);
+            auto e3 =
+                LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA);
+            auto e4 =
+                LedgerTestUtils::generateValidLedgerEntryOfType(CONTRACT_DATA);
+
+            // Old bucket:
+            // e1 -> ARCHIVED
+            // e2 -> LIVE
+            // e3 -> DELETED
+            // e4 -> DELETED
+            auto b1 = HotArchiveBucket::fresh(
+                bm, vers, {e1}, {LedgerEntryKey(e2)},
+                {LedgerEntryKey(e3), LedgerEntryKey(e4)},
+                /*countMergeEvents=*/true, clock.getIOContext(),
+                /*doFsync=*/true);
+
+            // New bucket:
+            // e1 -> DELETED
+            // e2 -> ARCHIVED
+            // e3 -> LIVE
+            auto b2 = HotArchiveBucket::fresh(
+                bm, vers, {e2}, {LedgerEntryKey(e3)}, {LedgerEntryKey(e1)},
+                /*countMergeEvents=*/true, clock.getIOContext(),
+                /*doFsync=*/true);
+
+            // Expected result:
+            // e1 -> DELETED
+            // e2 -> ARCHIVED
+            // e3 -> LIVE
+            // e4 -> DELETED
+            auto merged =
+                Bucket::merge(bm, vers, b1, b2, /*shadows=*/{},
+                              /*keepTombstoneEntries=*/true,
+                              /*countMergeEvents=*/true, clock.getIOContext(),
+                              /*doFsync=*/true);
+
+            bool seen1 = false;
+            bool seen4 = false;
+            auto count = 0;
+            for (HotArchiveBucketInputIterator iter(merged); iter; ++iter)
+            {
+                ++count;
+                auto const& e = *iter;
+                if (e.type() == HOT_ARCHIVE_ARCHIVED)
+                {
+                    REQUIRE(e.archivedEntry() == e2);
+                }
+                else if (e.type() == HOT_ARCHIVE_LIVE)
+                {
+                    REQUIRE(e.key() == LedgerEntryKey(e3));
+                }
+                else if (e.type() == HOT_ARCHIVE_DELETED)
+                {
+                    if (e.key() == LedgerEntryKey(e1))
+                    {
+                        REQUIRE(!seen1);
+                        seen1 = true;
+                    }
+                    else if (e.key() == LedgerEntryKey(e4))
+                    {
+                        REQUIRE(!seen4);
+                        seen4 = true;
+                    }
+                }
+                else
+                {
+                    FAIL();
+                }
+            }
+
+            REQUIRE(seen1);
+            REQUIRE(seen4);
+            REQUIRE(count == 4);
+        }
+    });
+}
+
 static LedgerEntry
 generateAccount()
 {
@@ -330,7 +423,8 @@ TEST_CASE("merges proceed old-style despite newer shadows",
     Config const& cfg = getTestConfig();
     Application::pointer app = createTestApplication(clock, cfg);
     auto& bm = app->getBucketManager();
-    auto v12 = static_cast<uint32_t>(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED);
+    auto v12 =
+        static_cast<uint32_t>(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED);
     auto v11 = v12 - 1;
     auto v10 = v11 - 1;
 
@@ -338,31 +432,31 @@ TEST_CASE("merges proceed old-style despite newer shadows",
     LedgerEntry otherLiveA = generateDifferentAccount({liveEntry});
 
     auto b10first =
-        Bucket::fresh(bm, v10, {}, {liveEntry}, {},
-                      /*countMergeEvents=*/true, clock.getIOContext(),
-                      /*doFsync=*/true);
+        LiveBucket::fresh(bm, v10, {}, {liveEntry}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
     auto b10second =
-        Bucket::fresh(bm, v10, {}, {otherLiveA}, {},
-                      /*countMergeEvents=*/true, clock.getIOContext(),
-                      /*doFsync=*/true);
+        LiveBucket::fresh(bm, v10, {}, {otherLiveA}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
 
     auto b11first =
-        Bucket::fresh(bm, v11, {}, {liveEntry}, {},
-                      /*countMergeEvents=*/true, clock.getIOContext(),
-                      /*doFsync=*/true);
+        LiveBucket::fresh(bm, v11, {}, {liveEntry}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
     auto b11second =
-        Bucket::fresh(bm, v11, {}, {otherLiveA}, {},
-                      /*countMergeEvents=*/true, clock.getIOContext(),
-                      /*doFsync=*/true);
+        LiveBucket::fresh(bm, v11, {}, {otherLiveA}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
 
     auto b12first =
-        Bucket::fresh(bm, v12, {}, {liveEntry}, {}, /*countMergeEvents=*/true,
-                      clock.getIOContext(),
-                      /*doFsync=*/true);
+        LiveBucket::fresh(bm, v12, {}, {liveEntry}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
     auto b12second =
-        Bucket::fresh(bm, v12, {}, {otherLiveA}, {},
-                      /*countMergeEvents=*/true, clock.getIOContext(),
-                      /*doFsync=*/true);
+        LiveBucket::fresh(bm, v12, {}, {otherLiveA}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
 
     SECTION("shadow version 12")
     {
@@ -370,10 +464,10 @@ TEST_CASE("merges proceed old-style despite newer shadows",
         auto bucket =
             Bucket::merge(bm, v12, b11first, b11second,
                           /*shadows=*/{b12first},
-                          /*keepDeadEntries=*/true,
+                          /*keepTombstoneEntries=*/true,
                           /*countMergeEvents=*/true, clock.getIOContext(),
                           /*doFsync=*/true);
-        REQUIRE(Bucket::getBucketVersion(bucket) == v11);
+        REQUIRE(bucket->getBucketVersion() == v11);
     }
     SECTION("shadow versions mixed, pick lower")
     {
@@ -382,16 +476,16 @@ TEST_CASE("merges proceed old-style despite newer shadows",
         auto bucket =
             Bucket::merge(bm, v12, b10first, b10second,
                           /*shadows=*/{b12first, b11second},
-                          /*keepDeadEntries=*/true,
+                          /*keepTombstoneEntries=*/true,
                           /*countMergeEvents=*/true, clock.getIOContext(),
                           /*doFsync=*/true);
-        REQUIRE(Bucket::getBucketVersion(bucket) == v11);
+        REQUIRE(bucket->getBucketVersion() == v11);
     }
     SECTION("refuse to merge new version with shadow")
     {
         REQUIRE_THROWS_AS(Bucket::merge(bm, v12, b12first, b12second,
                                         /*shadows=*/{b12first},
-                                        /*keepDeadEntries=*/true,
+                                        /*keepTombstoneEntries=*/true,
                                         /*countMergeEvents=*/true,
                                         clock.getIOContext(),
                                         /*doFsync=*/true),
@@ -409,21 +503,25 @@ TEST_CASE("merges refuse to exceed max protocol version",
     auto vers = getAppLedgerVersion(app);
     LedgerEntry liveEntry = generateAccount();
     LedgerEntry otherLiveA = generateDifferentAccount({liveEntry});
-    auto bold1 = Bucket::fresh(bm, vers - 1, {}, {liveEntry}, {},
-                               /*countMergeEvents=*/true, clock.getIOContext(),
-                               /*doFsync=*/true);
-    auto bold2 = Bucket::fresh(bm, vers - 1, {}, {otherLiveA}, {},
-                               /*countMergeEvents=*/true, clock.getIOContext(),
-                               /*doFsync=*/true);
-    auto bnew1 = Bucket::fresh(bm, vers, {}, {liveEntry}, {},
-                               /*countMergeEvents=*/true, clock.getIOContext(),
-                               /*doFsync=*/true);
-    auto bnew2 = Bucket::fresh(bm, vers, {}, {otherLiveA}, {},
-                               /*countMergeEvents=*/true, clock.getIOContext(),
-                               /*doFsync=*/true);
+    auto bold1 =
+        LiveBucket::fresh(bm, vers - 1, {}, {liveEntry}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
+    auto bold2 =
+        LiveBucket::fresh(bm, vers - 1, {}, {otherLiveA}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
+    auto bnew1 =
+        LiveBucket::fresh(bm, vers, {}, {liveEntry}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
+    auto bnew2 =
+        LiveBucket::fresh(bm, vers, {}, {otherLiveA}, {},
+                          /*countMergeEvents=*/true, clock.getIOContext(),
+                          /*doFsync=*/true);
     REQUIRE_THROWS_AS(Bucket::merge(bm, vers - 1, bnew1, bnew2,
                                     /*shadows=*/{},
-                                    /*keepDeadEntries=*/true,
+                                    /*keepTombstoneEntries=*/true,
                                     /*countMergeEvents=*/true,
                                     clock.getIOContext(),
                                     /*doFsync=*/true),
@@ -436,7 +534,7 @@ TEST_CASE("bucket output iterator rejects wrong-version entries",
     VirtualClock clock;
     Config const& cfg = getTestConfig();
     auto vers_new = static_cast<uint32_t>(
-        Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
+        LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
     BucketMetadata meta;
     meta.ledgerVersion = vers_new - 1;
     Application::pointer app = createTestApplication(clock, cfg);
@@ -447,8 +545,8 @@ TEST_CASE("bucket output iterator rejects wrong-version entries",
     metaEntry.type(METAENTRY);
     metaEntry.metaEntry() = meta;
     MergeCounters mc;
-    BucketOutputIterator out(bm.getTmpDir(), true, meta, mc,
-                             clock.getIOContext(), /*doFsync=*/true);
+    LiveBucketOutputIterator out(bm.getTmpDir(), true, meta, mc,
+                                 clock.getIOContext(), /*doFsync=*/true);
     REQUIRE_THROWS_AS(out.put(initEntry), std::runtime_error);
     REQUIRE_THROWS_AS(out.put(metaEntry), std::runtime_error);
 }
@@ -466,7 +564,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry",
 
         // Whether we're in the era of supporting or not-supporting INITENTRY.
         bool initEra = protocolVersionStartsFrom(
-            vers, Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
+            vers,
+            LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
 
         CLOG_INFO(Bucket, "=== finished buckets for initial account == ");
 
@@ -488,17 +587,17 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry",
 
         SECTION("dead and init account entries merge correctly")
         {
-            auto bInit =
-                Bucket::fresh(bm, vers, {initEntry}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto bDead =
-                Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto bInit = LiveBucket::fresh(bm, vers, {initEntry}, {}, {},
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
+            auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
             auto b1 = Bucket::merge(
                 bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bDead, /*shadows=*/{},
-                /*keepDeadEntries=*/true,
+                /*keepTombstoneEntries=*/true,
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
             // In initEra, the INIT will make it through fresh() to the bucket,
@@ -507,8 +606,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry",
             // fresh(), and that will be killed by the DEAD, leaving 1
             // (tombstone) entry.
             EntryCounts e(b1);
-            CHECK(e.nInit == 0);
-            CHECK(e.nLive == 0);
+            CHECK(e.nInitOrArchived == 0);
+            CHECK(e.nLiveOrHash == 0);
             if (initEra)
             {
                 CHECK(e.nMeta == 1);
@@ -524,33 +623,33 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry",
         SECTION("dead and init entries merge with intervening live entries "
                 "correctly")
         {
-            auto bInit =
-                Bucket::fresh(bm, vers, {initEntry}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto bLive =
-                Bucket::fresh(bm, vers, {}, {liveEntry}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto bDead =
-                Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto bInit = LiveBucket::fresh(bm, vers, {initEntry}, {}, {},
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
+            auto bLive = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {},
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
+            auto bDead = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                           /*countMergeEvents=*/true,
+                                           clock.getIOContext(),
+                                           /*doFsync=*/true);
             auto bmerge1 = Bucket::merge(
                 bm, cfg.LEDGER_PROTOCOL_VERSION, bInit, bLive, /*shadows=*/{},
-                /*keepDeadEntries=*/true,
+                /*keepTombstoneEntries=*/true,
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
             auto b1 = Bucket::merge(
                 bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bDead, /*shadows=*/{},
-                /*keepDeadEntries=*/true,
+                /*keepTombstoneEntries=*/true,
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
             // The same thing should happen here as above, except that the INIT
             // will merge-over the LIVE during fresh().
             EntryCounts e(b1);
-            CHECK(e.nInit == 0);
-            CHECK(e.nLive == 0);
+            CHECK(e.nInitOrArchived == 0);
+            CHECK(e.nLiveOrHash == 0);
             if (initEra)
             {
                 CHECK(e.nMeta == 1);
@@ -566,62 +665,62 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry",
         SECTION("dead and init entries annihilate multiple live entries via "
                 "separate buckets")
         {
-            auto bold =
-                Bucket::fresh(bm, vers, {initEntry}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto bmed = Bucket::fresh(
+            auto bold = LiveBucket::fresh(bm, vers, {initEntry}, {}, {},
+                                          /*countMergeEvents=*/true,
+                                          clock.getIOContext(),
+                                          /*doFsync=*/true);
+            auto bmed = LiveBucket::fresh(
                 bm, vers, {}, {otherLiveA, otherLiveB, liveEntry, otherLiveC},
                 {}, /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
-            auto bnew =
-                Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto bnew = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                          /*countMergeEvents=*/true,
+                                          clock.getIOContext(),
+                                          /*doFsync=*/true);
             EntryCounts eold(bold), emed(bmed), enew(bnew);
             if (initEra)
             {
                 CHECK(eold.nMeta == 1);
                 CHECK(emed.nMeta == 1);
                 CHECK(enew.nMeta == 1);
-                CHECK(eold.nInit == 1);
-                CHECK(eold.nLive == 0);
+                CHECK(eold.nInitOrArchived == 1);
+                CHECK(eold.nLiveOrHash == 0);
             }
             else
             {
                 CHECK(eold.nMeta == 0);
                 CHECK(emed.nMeta == 0);
                 CHECK(enew.nMeta == 0);
-                CHECK(eold.nInit == 0);
-                CHECK(eold.nLive == 1);
+                CHECK(eold.nInitOrArchived == 0);
+                CHECK(eold.nLiveOrHash == 1);
             }
 
             CHECK(eold.nDead == 0);
 
-            CHECK(emed.nInit == 0);
-            CHECK(emed.nLive == 4);
+            CHECK(emed.nInitOrArchived == 0);
+            CHECK(emed.nLiveOrHash == 4);
             CHECK(emed.nDead == 0);
 
-            CHECK(enew.nInit == 0);
-            CHECK(enew.nLive == 0);
+            CHECK(enew.nInitOrArchived == 0);
+            CHECK(enew.nLiveOrHash == 0);
             CHECK(enew.nDead == 1);
 
             auto bmerge1 = Bucket::merge(
                 bm, cfg.LEDGER_PROTOCOL_VERSION, bold, bmed, /*shadows=*/{},
-                /*keepDeadEntries=*/true,
+                /*keepTombstoneEntries=*/true,
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
             auto bmerge2 = Bucket::merge(
                 bm, cfg.LEDGER_PROTOCOL_VERSION, bmerge1, bnew, /*shadows=*/{},
-                /*keepDeadEntries=*/true,
+                /*keepTombstoneEntries=*/true,
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
             EntryCounts emerge1(bmerge1), emerge2(bmerge2);
             if (initEra)
             {
                 CHECK(emerge1.nMeta == 1);
-                CHECK(emerge1.nInit == 1);
-                CHECK(emerge1.nLive == 3);
+                CHECK(emerge1.nInitOrArchived == 1);
+                CHECK(emerge1.nLiveOrHash == 3);
 
                 CHECK(emerge2.nMeta == 1);
                 CHECK(emerge2.nDead == 0);
@@ -629,15 +728,15 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry",
             else
             {
                 CHECK(emerge1.nMeta == 0);
-                CHECK(emerge1.nInit == 0);
-                CHECK(emerge1.nLive == 4);
+                CHECK(emerge1.nInitOrArchived == 0);
+                CHECK(emerge1.nLiveOrHash == 4);
 
                 CHECK(emerge2.nMeta == 0);
                 CHECK(emerge2.nDead == 1);
             }
             CHECK(emerge1.nDead == 0);
-            CHECK(emerge2.nInit == 0);
-            CHECK(emerge2.nLive == 3);
+            CHECK(emerge2.nInitOrArchived == 0);
+            CHECK(emerge2.nLiveOrHash == 3);
         }
     });
 }
@@ -655,7 +754,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
 
         // Whether we're in the era of supporting or not-supporting INITENTRY.
         bool initEra = protocolVersionStartsFrom(
-            vers, Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
+            vers,
+            LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
 
         CLOG_INFO(Bucket, "=== finished buckets for initial account == ");
 
@@ -680,37 +780,37 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             // In pre-11 versions, shadows _do_ eliminate lifecycle entries
             // (INIT/DEAD). In 11-and-after versions, shadows _don't_ eliminate
             // lifecycle entries.
-            auto shadow =
-                Bucket::fresh(bm, vers, {}, {liveEntry}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto b1 =
-                Bucket::fresh(bm, vers, {initEntry}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto b2 =
-                Bucket::fresh(bm, vers, {otherInitA}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto shadow = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto b1 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {},
+                                        /*countMergeEvents=*/true,
+                                        clock.getIOContext(),
+                                        /*doFsync=*/true);
+            auto b2 = LiveBucket::fresh(bm, vers, {otherInitA}, {}, {},
+                                        /*countMergeEvents=*/true,
+                                        clock.getIOContext(),
+                                        /*doFsync=*/true);
             auto merged =
                 Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, b1, b2,
                               /*shadows=*/{shadow},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             EntryCounts e(merged);
             if (initEra)
             {
                 CHECK(e.nMeta == 1);
-                CHECK(e.nInit == 2);
-                CHECK(e.nLive == 0);
+                CHECK(e.nInitOrArchived == 2);
+                CHECK(e.nLiveOrHash == 0);
                 CHECK(e.nDead == 0);
             }
             else
             {
                 CHECK(e.nMeta == 0);
-                CHECK(e.nInit == 0);
-                CHECK(e.nLive == 1);
+                CHECK(e.nInitOrArchived == 0);
+                CHECK(e.nLiveOrHash == 1);
                 CHECK(e.nDead == 0);
             }
         }
@@ -722,26 +822,26 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             // INIT. See comment in `maybePut` in Bucket.cpp.
             //
             // (level1 is newest here, level5 is oldest)
-            auto level1 =
-                Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto level2 =
-                Bucket::fresh(bm, vers, {initEntry2}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto level3 =
-                Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto level4 =
-                Bucket::fresh(bm, vers, {}, {}, {}, /*countMergeEvents=*/true,
-                              clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto level5 =
-                Bucket::fresh(bm, vers, {initEntry}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto level1 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto level2 = LiveBucket::fresh(bm, vers, {initEntry2}, {}, {},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto level3 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto level4 = LiveBucket::fresh(bm, vers, {}, {}, {},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto level5 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
 
             // Do a merge between levels 4 and 3, with shadows from 2 and 1,
             // risking shadowing-out level 3. Level 4 is a placeholder here,
@@ -750,7 +850,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             auto merge43 =
                 Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level4, level3,
                               /*shadows=*/{level2, level1},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             EntryCounts e43(merge43);
@@ -758,16 +858,16 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             {
                 // New-style, we preserve the dead entry.
                 CHECK(e43.nMeta == 1);
-                CHECK(e43.nInit == 0);
-                CHECK(e43.nLive == 0);
+                CHECK(e43.nInitOrArchived == 0);
+                CHECK(e43.nLiveOrHash == 0);
                 CHECK(e43.nDead == 1);
             }
             else
             {
                 // Old-style, we shadowed-out the dead entry.
                 CHECK(e43.nMeta == 0);
-                CHECK(e43.nInit == 0);
-                CHECK(e43.nLive == 0);
+                CHECK(e43.nInitOrArchived == 0);
+                CHECK(e43.nLiveOrHash == 0);
                 CHECK(e43.nDead == 0);
             }
 
@@ -776,7 +876,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             auto merge21 =
                 Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level2, level1,
                               /*shadows=*/{},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             EntryCounts e21(merge21);
@@ -784,16 +884,16 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             {
                 // New-style, they mutually annihilate.
                 CHECK(e21.nMeta == 1);
-                CHECK(e21.nInit == 0);
-                CHECK(e21.nLive == 0);
+                CHECK(e21.nInitOrArchived == 0);
+                CHECK(e21.nLiveOrHash == 0);
                 CHECK(e21.nDead == 0);
             }
             else
             {
                 // Old-style, we keep the tombstone around.
                 CHECK(e21.nMeta == 0);
-                CHECK(e21.nInit == 0);
-                CHECK(e21.nLive == 0);
+                CHECK(e21.nInitOrArchived == 0);
+                CHECK(e21.nLiveOrHash == 0);
                 CHECK(e21.nDead == 1);
             }
 
@@ -802,13 +902,13 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             auto merge4321 =
                 Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge43, merge21,
                               /*shadows=*/{},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             auto merge54321 = Bucket::merge(
                 bm, cfg.LEDGER_PROTOCOL_VERSION, level5, merge4321,
                 /*shadows=*/{},
-                /*keepDeadEntries=*/true,
+                /*keepTombstoneEntries=*/true,
                 /*countMergeEvents=*/true, clock.getIOContext(),
                 /*doFsync=*/true);
             EntryCounts e54321(merge21);
@@ -816,16 +916,16 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             {
                 // New-style, we should get a second mutual annihilation.
                 CHECK(e54321.nMeta == 1);
-                CHECK(e54321.nInit == 0);
-                CHECK(e54321.nLive == 0);
+                CHECK(e54321.nInitOrArchived == 0);
+                CHECK(e54321.nLiveOrHash == 0);
                 CHECK(e54321.nDead == 0);
             }
             else
             {
                 // Old-style, the tombstone should clobber the live entry.
                 CHECK(e54321.nMeta == 0);
-                CHECK(e54321.nInit == 0);
-                CHECK(e54321.nLive == 0);
+                CHECK(e54321.nInitOrArchived == 0);
+                CHECK(e54321.nLiveOrHash == 0);
                 CHECK(e54321.nDead == 1);
             }
         }
@@ -839,18 +939,18 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             // `maybePut` in Bucket.cpp.
             //
             // (level1 is newest here, level3 is oldest)
-            auto level1 =
-                Bucket::fresh(bm, vers, {}, {}, {deadEntry},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto level2 =
-                Bucket::fresh(bm, vers, {}, {liveEntry}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
-            auto level3 =
-                Bucket::fresh(bm, vers, {initEntry}, {}, {},
-                              /*countMergeEvents=*/true, clock.getIOContext(),
-                              /*doFsync=*/true);
+            auto level1 = LiveBucket::fresh(bm, vers, {}, {}, {deadEntry},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto level2 = LiveBucket::fresh(bm, vers, {}, {liveEntry}, {},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
+            auto level3 = LiveBucket::fresh(bm, vers, {initEntry}, {}, {},
+                                            /*countMergeEvents=*/true,
+                                            clock.getIOContext(),
+                                            /*doFsync=*/true);
 
             // Do a merge between levels 3 and 2, with shadow from 1, risking
             // shadowing-out the init on level 3. Level 2 is a placeholder here,
@@ -859,7 +959,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             auto merge32 =
                 Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, level3, level2,
                               /*shadows=*/{level1},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             EntryCounts e32(merge32);
@@ -867,16 +967,16 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             {
                 // New-style, we preserve the init entry.
                 CHECK(e32.nMeta == 1);
-                CHECK(e32.nInit == 1);
-                CHECK(e32.nLive == 0);
+                CHECK(e32.nInitOrArchived == 1);
+                CHECK(e32.nLiveOrHash == 0);
                 CHECK(e32.nDead == 0);
             }
             else
             {
                 // Old-style, we shadowed-out the live and init entries.
                 CHECK(e32.nMeta == 0);
-                CHECK(e32.nInit == 0);
-                CHECK(e32.nLive == 0);
+                CHECK(e32.nInitOrArchived == 0);
+                CHECK(e32.nLiveOrHash == 0);
                 CHECK(e32.nDead == 0);
             }
 
@@ -886,7 +986,7 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             auto merge321 =
                 Bucket::merge(bm, cfg.LEDGER_PROTOCOL_VERSION, merge32, level1,
                               /*shadows=*/{},
-                              /*keepDeadEntries=*/true,
+                              /*keepTombstoneEntries=*/true,
                               /*countMergeEvents=*/true, clock.getIOContext(),
                               /*doFsync=*/true);
             EntryCounts e321(merge321);
@@ -894,8 +994,8 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
             {
                 // New-style, init meets dead and they annihilate.
                 CHECK(e321.nMeta == 1);
-                CHECK(e321.nInit == 0);
-                CHECK(e321.nLive == 0);
+                CHECK(e321.nInitOrArchived == 0);
+                CHECK(e321.nLiveOrHash == 0);
                 CHECK(e321.nDead == 0);
             }
             else
@@ -903,59 +1003,14 @@ TEST_CASE_VERSIONS("merging bucket entries with initentry with shadows",
                 // Old-style, init was already shadowed-out, so dead
                 // accumulates.
                 CHECK(e321.nMeta == 0);
-                CHECK(e321.nInit == 0);
-                CHECK(e321.nLive == 0);
+                CHECK(e321.nInitOrArchived == 0);
+                CHECK(e321.nLiveOrHash == 0);
                 CHECK(e321.nDead == 1);
             }
         }
     });
 }
 
-TEST_CASE_VERSIONS("legacy bucket apply", "[bucket]")
-{
-    VirtualClock clock;
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
-    for_versions_with_differing_bucket_logic(cfg, [&](Config const& cfg) {
-        Application::pointer app = createTestApplication(clock, cfg);
-
-        std::vector<LedgerEntry> live(10), noLive;
-        std::vector<LedgerKey> dead, noDead;
-
-        for (auto& e : live)
-        {
-            e.data.type(ACCOUNT);
-            auto& a = e.data.account();
-            a = LedgerTestUtils::generateValidAccountEntry(5);
-            a.balance = 1000000000;
-            dead.emplace_back(LedgerEntryKey(e));
-        }
-
-        std::shared_ptr<Bucket> birth = Bucket::fresh(
-            app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead,
-            /*countMergeEvents=*/true, clock.getIOContext(),
-            /*doFsync=*/true);
-
-        std::shared_ptr<Bucket> death = Bucket::fresh(
-            app->getBucketManager(), getAppLedgerVersion(app), {}, noLive, dead,
-            /*countMergeEvents=*/true, clock.getIOContext(),
-            /*doFsync=*/true);
-
-        CLOG_INFO(Bucket, "Applying bucket with {} live entries", live.size());
-        birth->apply(*app);
-        {
-            auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT);
-            REQUIRE(count == live.size() + 1 /* root account */);
-        }
-
-        CLOG_INFO(Bucket, "Applying bucket with {} dead entries", dead.size());
-        death->apply(*app);
-        {
-            auto count = app->getLedgerTxnRoot().countObjects(ACCOUNT);
-            REQUIRE(count == 1 /* root account */);
-        }
-    });
-}
-
 TEST_CASE("bucket apply bench", "[bucketbench][!hide]")
 {
     auto runtest = [](Config::TestDbMode mode) {
@@ -973,7 +1028,7 @@ TEST_CASE("bucket apply bench", "[bucketbench][!hide]")
             a = LedgerTestUtils::generateValidAccountEntry(5);
         }
 
-        std::shared_ptr<Bucket> birth = Bucket::fresh(
+        std::shared_ptr<LiveBucket> birth = LiveBucket::fresh(
             app->getBucketManager(), getAppLedgerVersion(app), {}, live, noDead,
             /*countMergeEvents=*/true, clock.getIOContext(),
             /*doFsync=*/true);
diff --git a/src/catchup/ApplyBucketsWork.cpp b/src/catchup/ApplyBucketsWork.cpp
index 8a871b5f31..cd89b5e79b 100644
--- a/src/catchup/ApplyBucketsWork.cpp
+++ b/src/catchup/ApplyBucketsWork.cpp
@@ -51,45 +51,30 @@ class TempLedgerVersionSetter : NonMovableOrCopyable
     }
 };
 
-uint32_t
-ApplyBucketsWork::startingLevel()
-{
-    return mApp.getConfig().isUsingBucketListDB() ? 0
-                                                  : BucketList::kNumLevels - 1;
-}
-
 ApplyBucketsWork::ApplyBucketsWork(
     Application& app,
-    std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
-    HistoryArchiveState const& applyState, uint32_t maxProtocolVersion,
-    std::function<bool(LedgerEntryType)> onlyApply)
+    std::map<std::string, std::shared_ptr<LiveBucket>> const& buckets,
+    HistoryArchiveState const& applyState, uint32_t maxProtocolVersion)
     : Work(app, "apply-buckets", BasicWork::RETRY_NEVER)
     , mBuckets(buckets)
     , mApplyState(applyState)
-    , mEntryTypeFilter(onlyApply)
     , mTotalSize(0)
-    , mLevel(startingLevel())
+    , mLevel(0)
     , mMaxProtocolVersion(maxProtocolVersion)
     , mCounters(app.getClock().now())
+    , mIsApplyInvariantEnabled(
+          app.getInvariantManager().isBucketApplyInvariantEnabled())
 {
 }
 
-ApplyBucketsWork::ApplyBucketsWork(
-    Application& app,
-    std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
-    HistoryArchiveState const& applyState, uint32_t maxProtocolVersion)
-    : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion,
-                       [](LedgerEntryType) { return true; })
-{
-}
-
-std::shared_ptr<Bucket>
+std::shared_ptr<LiveBucket>
 ApplyBucketsWork::getBucket(std::string const& hash)
 {
     auto i = mBuckets.find(hash);
-    auto b = (i != mBuckets.end())
-                 ? i->second
-                 : mApp.getBucketManager().getBucketByHash(hexToBin256(hash));
+    auto b =
+        (i != mBuckets.end())
+            ? i->second
+            : mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash));
     releaseAssert(b);
     return b;
 }
@@ -109,40 +94,19 @@ ApplyBucketsWork::doReset()
     mLastPos = 0;
     mBucketToApplyIndex = 0;
     mMinProtocolVersionSeen = UINT32_MAX;
+    mSeenKeysBeforeApply.clear();
     mSeenKeys.clear();
     mBucketsToApply.clear();
     mBucketApplicator.reset();
 
     if (!isAborting())
     {
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            // The current size of this set is 1.6 million during BucketApply
-            // (as of 12/20/23). There's not a great way to estimate this, so
-            // reserving with some extra wiggle room
-            mSeenKeys.reserve(2'000'000);
-        }
+        // The current size of this set is 1.6 million during BucketApply
+        // (as of 12/20/23). There's not a great way to estimate this, so
+        // reserving with some extra wiggle room
+        mSeenKeys.reserve(2'000'000);
 
-        // When applying buckets with accounts, we have to make sure that the
-        // root account has been removed. This comes into play, for example,
-        // when applying buckets from genesis the root account already exists.
-        if (mEntryTypeFilter(ACCOUNT))
-        {
-            TempLedgerVersionSetter tlvs(mApp, mMaxProtocolVersion);
-            {
-                SecretKey skey = SecretKey::fromSeed(mApp.getNetworkID());
-
-                LedgerTxn ltx(mApp.getLedgerTxnRoot());
-                auto rootAcc = loadAccount(ltx, skey.getPublicKey());
-                if (rootAcc)
-                {
-                    rootAcc.erase();
-                }
-                ltx.commit();
-            }
-        }
-
-        auto addBucket = [this](std::shared_ptr<Bucket> const& bucket) {
+        auto addBucket = [this](std::shared_ptr<LiveBucket> const& bucket) {
             if (bucket->getSize() > 0)
             {
                 mTotalBuckets++;
@@ -150,30 +114,16 @@ ApplyBucketsWork::doReset()
             }
             mBucketsToApply.emplace_back(bucket);
         };
-        // If using bucketlist DB, we iterate through the BucketList in order
-        // (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying offers
-        // (and can keep track of all seen keys). Otherwise, we iterate in
-        // reverse order (i.e. L N snap, L N curr, L N-1 snap, etc.) as we are
-        // applying all entry types and cannot keep track of all seen keys as it
-        // would be too large.
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            for (auto const& hsb : mApplyState.currentBuckets)
-            {
-                addBucket(getBucket(hsb.curr));
-                addBucket(getBucket(hsb.snap));
-            }
-        }
-        else
+
+        // We iterate through the live BucketList in
+        // order (i.e. L0 curr, L0 snap, L1 curr, etc) as we are just applying
+        // offers (and can keep track of all seen keys).
+        for (auto const& hsb : mApplyState.currentBuckets)
         {
-            for (auto iter = mApplyState.currentBuckets.rbegin();
-                 iter != mApplyState.currentBuckets.rend(); ++iter)
-            {
-                auto const& hsb = *iter;
-                addBucket(getBucket(hsb.snap));
-                addBucket(getBucket(hsb.curr));
-            }
+            addBucket(getBucket(hsb.curr));
+            addBucket(getBucket(hsb.snap));
         }
+
         // estimate the number of ledger entries contained in those buckets
         // use accounts as a rough approximator as to overestimate a bit
         // (default BucketEntry contains a default AccountEntry)
@@ -198,11 +148,19 @@ ApplyBucketsWork::startBucket()
     ZoneScoped;
     auto bucket = mBucketsToApply.at(mBucketToApplyIndex);
     mMinProtocolVersionSeen =
-        std::min(mMinProtocolVersionSeen, Bucket::getBucketVersion(bucket));
+        std::min(mMinProtocolVersionSeen, bucket->getBucketVersion());
+
+    // Take a snapshot of seen keys before applying the bucket, only if
+    // invariants are enabled since this is expensive.
+    if (mIsApplyInvariantEnabled)
+    {
+        mSeenKeysBeforeApply = mSeenKeys;
+    }
+
     // Create a new applicator for the bucket.
     mBucketApplicator = std::make_unique<BucketApplicator>(
         mApp, mMaxProtocolVersion, mMinProtocolVersionSeen, mLevel, bucket,
-        mEntryTypeFilter, mSeenKeys);
+        mSeenKeys);
 }
 
 void
@@ -213,54 +171,36 @@ ApplyBucketsWork::prepareForNextBucket()
     mApp.getCatchupManager().bucketsApplied();
     mBucketToApplyIndex++;
     // If mBucketToApplyIndex is even, we are progressing to the next
-    // level, if we are using BucketListDB, this is the next greater
-    // level, otherwise it's the next lower level.
+    // level
     if (mBucketToApplyIndex % 2 == 0)
     {
-        mLevel =
-            mApp.getConfig().isUsingBucketListDB() ? mLevel + 1 : mLevel - 1;
+        ++mLevel;
     }
 }
 
-// We iterate through the BucketList either in-order (level 0 curr, level 0
-// snap, level 1 curr, etc) when only applying offers, or in reverse order
-// (level 9 curr, level 8 snap, level 8 curr, etc) when applying all entry
-// types. When only applying offers, we keep track of the keys we have already
+// We iterate through the live BucketList either in-order (level 0 curr, level 0
+// snap, level 1 curr, etc). We keep track of the keys we have already
 // seen, and only apply an entry to the DB if it has not been seen before. This
 // allows us to perform a single write to the DB and ensure that only the newest
 // version is written.
 //
-// When applying all entry types, this seen keys set would be too large. Since
-// there can be no seen keys set, if we were to apply every entry in order, we
-// would overwrite the newest version of an entry with an older version as we
-// iterate through the BucketList. Due to this, we iterate in reverse order such
-// that the newest version of a key is written last, overwriting the older
-// versions. This is much slower due to DB churn.
-
 BasicWork::State
 ApplyBucketsWork::doWork()
 {
     ZoneScoped;
 
     // Step 1: index buckets. Step 2: apply buckets. Step 3: assume state
-    bool isUsingBucketListDB = mApp.getConfig().isUsingBucketListDB();
-    if (isUsingBucketListDB)
+    if (!mIndexBucketsWork)
     {
-        // Step 1: index buckets.
-        if (!mIndexBucketsWork)
-        {
-            // Spawn indexing work for the first time
-            mIndexBucketsWork = addWork<IndexBucketsWork>(mBucketsToApply);
-            return State::WORK_RUNNING;
-        }
-        else if (mIndexBucketsWork->getState() !=
-                 BasicWork::State::WORK_SUCCESS)
-        {
-            // Exit early if indexing work is still running, or failed
-            return mIndexBucketsWork->getState();
-        }
+        // Spawn indexing work for the first time
+        mIndexBucketsWork = addWork<IndexBucketsWork>(mBucketsToApply);
+        return State::WORK_RUNNING;
+    }
 
-        // Otherwise, continue with next steps
+    else if (mIndexBucketsWork->getState() != BasicWork::State::WORK_SUCCESS)
+    {
+        // Exit early if indexing work is still running, or failed
+        return mIndexBucketsWork->getState();
     }
 
     if (!mAssumeStateWork)
@@ -280,8 +220,7 @@ ApplyBucketsWork::doWork()
             }
         }
 
-        auto isCurr = isUsingBucketListDB ? mBucketToApplyIndex % 2 == 0
-                                          : mBucketToApplyIndex % 2 == 1;
+        auto isCurr = mBucketToApplyIndex % 2 == 0;
         if (mBucketApplicator)
         {
             TempLedgerVersionSetter tlvs(mApp, mMaxProtocolVersion);
@@ -292,10 +231,13 @@ ApplyBucketsWork::doWork()
                 return State::WORK_RUNNING;
             }
             // Application complete, check invariants and prepare for next
-            // bucket.
+            // bucket. Applying a bucket updates mSeenKeys with the keys applied
+            // by that bucket, so we need to provide a copy of the keys before
+            // application to the invariant check.
             mApp.getInvariantManager().checkOnBucketApply(
                 mBucketsToApply.at(mBucketToApplyIndex),
-                mApplyState.currentLedger, mLevel, isCurr, mEntryTypeFilter);
+                mApplyState.currentLedger, mLevel, isCurr,
+                mSeenKeysBeforeApply);
             prepareForNextBucket();
         }
         if (!appliedAllBuckets())
@@ -365,8 +307,7 @@ ApplyBucketsWork::getStatus() const
 {
     // This status string only applies to step 2 when we actually apply the
     // buckets.
-    bool doneIndexing = !mApp.getConfig().isUsingBucketListDB() ||
-                        (mIndexBucketsWork && mIndexBucketsWork->isDone());
+    bool doneIndexing = mIndexBucketsWork && mIndexBucketsWork->isDone();
     if (doneIndexing && !mSpawnedAssumeStateWork)
     {
         auto size = mTotalSize == 0 ? 0 : (100 * mAppliedSize / mTotalSize);
diff --git a/src/catchup/ApplyBucketsWork.h b/src/catchup/ApplyBucketsWork.h
index 276e4caa05..bdff18bed1 100644
--- a/src/catchup/ApplyBucketsWork.h
+++ b/src/catchup/ApplyBucketsWork.h
@@ -12,8 +12,7 @@ namespace stellar
 {
 
 class AssumeStateWork;
-class BucketLevel;
-class BucketList;
+class LiveBucketList;
 class Bucket;
 class IndexBucketsWork;
 struct HistoryArchiveState;
@@ -21,9 +20,8 @@ struct LedgerHeaderHistoryEntry;
 
 class ApplyBucketsWork : public Work
 {
-    std::map<std::string, std::shared_ptr<Bucket>> const& mBuckets;
+    std::map<std::string, std::shared_ptr<LiveBucket>> const& mBuckets;
     HistoryArchiveState const& mApplyState;
-    std::function<bool(LedgerEntryType)> mEntryTypeFilter;
 
     bool mSpawnedAssumeStateWork{false};
     std::shared_ptr<AssumeStateWork> mAssumeStateWork{};
@@ -39,17 +37,18 @@ class ApplyBucketsWork : public Work
     uint32_t mLevel{0};
     uint32_t mMaxProtocolVersion{0};
     uint32_t mMinProtocolVersionSeen{UINT32_MAX};
+    std::unordered_set<LedgerKey> mSeenKeysBeforeApply;
     std::unordered_set<LedgerKey> mSeenKeys;
-    std::vector<std::shared_ptr<Bucket>> mBucketsToApply;
+    std::vector<std::shared_ptr<LiveBucket>> mBucketsToApply;
     std::unique_ptr<BucketApplicator> mBucketApplicator;
     bool mDelayChecked{false};
 
     BucketApplicator::Counters mCounters;
+    bool const mIsApplyInvariantEnabled;
 
     void advance(std::string const& name, BucketApplicator& applicator);
-    std::shared_ptr<Bucket> getBucket(std::string const& bucketHash);
+    std::shared_ptr<LiveBucket> getBucket(std::string const& bucketHash);
 
-    uint32_t startingLevel();
     bool appliedAllBuckets() const;
     void startBucket();
     void prepareForNextBucket();
@@ -57,13 +56,8 @@ class ApplyBucketsWork : public Work
   public:
     ApplyBucketsWork(
         Application& app,
-        std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
+        std::map<std::string, std::shared_ptr<LiveBucket>> const& buckets,
         HistoryArchiveState const& applyState, uint32_t maxProtocolVersion);
-    ApplyBucketsWork(
-        Application& app,
-        std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
-        HistoryArchiveState const& applyState, uint32_t maxProtocolVersion,
-        std::function<bool(LedgerEntryType)> onlyApply);
     ~ApplyBucketsWork() = default;
 
     std::string getStatus() const override;
diff --git a/src/catchup/ApplyBufferedLedgersWork.cpp b/src/catchup/ApplyBufferedLedgersWork.cpp
index 6af378daf4..72d396c5a2 100644
--- a/src/catchup/ApplyBufferedLedgersWork.cpp
+++ b/src/catchup/ApplyBufferedLedgersWork.cpp
@@ -59,7 +59,7 @@ ApplyBufferedLedgersWork::onRun()
     auto applyLedger = std::make_shared<ApplyLedgerWork>(mApp, lcd);
 
     auto predicate = [](Application& app) {
-        auto& bl = app.getBucketManager().getBucketList();
+        auto& bl = app.getBucketManager().getLiveBucketList();
         auto& lm = app.getLedgerManager();
         bl.resolveAnyReadyFutures();
         return bl.futuresAllResolved(
diff --git a/src/catchup/ApplyCheckpointWork.cpp b/src/catchup/ApplyCheckpointWork.cpp
index 86f6bf01b5..ad51ded4d0 100644
--- a/src/catchup/ApplyCheckpointWork.cpp
+++ b/src/catchup/ApplyCheckpointWork.cpp
@@ -311,7 +311,7 @@ ApplyCheckpointWork::onRun()
     auto applyLedger = std::make_shared<ApplyLedgerWork>(mApp, *lcd);
 
     auto predicate = [](Application& app) {
-        auto& bl = app.getBucketManager().getBucketList();
+        auto& bl = app.getBucketManager().getLiveBucketList();
         auto& lm = app.getLedgerManager();
         bl.resolveAnyReadyFutures();
         return bl.futuresAllResolved(
diff --git a/src/catchup/AssumeStateWork.cpp b/src/catchup/AssumeStateWork.cpp
index e12ed8ac98..1305ccc711 100644
--- a/src/catchup/AssumeStateWork.cpp
+++ b/src/catchup/AssumeStateWork.cpp
@@ -26,12 +26,12 @@ AssumeStateWork::AssumeStateWork(Application& app,
     // Maintain reference to all Buckets in HAS to avoid garbage collection,
     // including future buckets that have already finished merging
     auto& bm = mApp.getBucketManager();
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
         auto curr =
-            bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr));
+            bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).curr));
         auto snap =
-            bm.getBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap));
+            bm.getLiveBucketByHash(hexToBin256(mHas.currentBuckets.at(i).snap));
         if (!(curr && snap))
         {
             throw std::runtime_error("Missing bucket files while "
@@ -44,7 +44,7 @@ AssumeStateWork::AssumeStateWork(Application& app,
         if (nextFuture.hasOutputHash())
         {
             auto nextBucket =
-                bm.getBucketByHash(hexToBin256(nextFuture.getOutputHash()));
+                bm.getLiveBucketByHash(hexToBin256(nextFuture.getOutputHash()));
             if (!nextBucket)
             {
                 throw std::runtime_error("Missing future bucket files while "
@@ -64,10 +64,7 @@ AssumeStateWork::doWork()
         std::vector<std::shared_ptr<BasicWork>> seq;
 
         // Index Bucket files
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            seq.push_back(std::make_shared<IndexBucketsWork>(mApp, mBuckets));
-        }
+        seq.push_back(std::make_shared<IndexBucketsWork>(mApp, mBuckets));
 
         // Add bucket files to BucketList and restart merges
         auto assumeStateCB = [&has = mHas,
diff --git a/src/catchup/AssumeStateWork.h b/src/catchup/AssumeStateWork.h
index 689cc1f1f6..92dc4b903c 100644
--- a/src/catchup/AssumeStateWork.h
+++ b/src/catchup/AssumeStateWork.h
@@ -11,6 +11,7 @@ namespace stellar
 
 class Bucket;
 struct HistoryArchiveState;
+class LiveBucket;
 
 class AssumeStateWork : public Work
 {
@@ -21,7 +22,7 @@ class AssumeStateWork : public Work
 
     // Keep strong reference to buckets in HAS so they are not garbage
     // collected during indexing
-    std::vector<std::shared_ptr<Bucket>> mBuckets{};
+    std::vector<std::shared_ptr<LiveBucket>> mBuckets{};
 
   public:
     AssumeStateWork(Application& app, HistoryArchiveState const& has,
diff --git a/src/catchup/CatchupManager.h b/src/catchup/CatchupManager.h
index 61c9b5821f..46318c5e98 100644
--- a/src/catchup/CatchupManager.h
+++ b/src/catchup/CatchupManager.h
@@ -63,7 +63,7 @@ class CatchupManager
     virtual void
     startCatchup(CatchupConfiguration configuration,
                  std::shared_ptr<HistoryArchive> archive,
-                 std::set<std::shared_ptr<Bucket>> bucketsToRetain) = 0;
+                 std::set<std::shared_ptr<LiveBucket>> bucketsToRetain) = 0;
 
     // Return status of catchup for or empty string, if no catchup in progress
     virtual std::string getStatus() const = 0;
diff --git a/src/catchup/CatchupManagerImpl.cpp b/src/catchup/CatchupManagerImpl.cpp
index b1eca69dd7..11db47260f 100644
--- a/src/catchup/CatchupManagerImpl.cpp
+++ b/src/catchup/CatchupManagerImpl.cpp
@@ -238,7 +238,7 @@ CatchupManagerImpl::processLedger(LedgerCloseData const& ledgerData)
 void
 CatchupManagerImpl::startCatchup(
     CatchupConfiguration configuration, std::shared_ptr<HistoryArchive> archive,
-    std::set<std::shared_ptr<Bucket>> bucketsToRetain)
+    std::set<std::shared_ptr<LiveBucket>> bucketsToRetain)
 {
     ZoneScoped;
     auto lastClosedLedger = mApp.getLedgerManager().getLastClosedLedgerNum();
diff --git a/src/catchup/CatchupManagerImpl.h b/src/catchup/CatchupManagerImpl.h
index 8c04a344aa..90e57bcbd7 100644
--- a/src/catchup/CatchupManagerImpl.h
+++ b/src/catchup/CatchupManagerImpl.h
@@ -62,10 +62,10 @@ class CatchupManagerImpl : public CatchupManager
     ~CatchupManagerImpl() override;
 
     void processLedger(LedgerCloseData const& ledgerData) override;
-    void
-    startCatchup(CatchupConfiguration configuration,
-                 std::shared_ptr<HistoryArchive> archive,
-                 std::set<std::shared_ptr<Bucket>> bucketsToRetain) override;
+    void startCatchup(
+        CatchupConfiguration configuration,
+        std::shared_ptr<HistoryArchive> archive,
+        std::set<std::shared_ptr<LiveBucket>> bucketsToRetain) override;
 
     std::string getStatus() const override;
 
diff --git a/src/catchup/CatchupWork.cpp b/src/catchup/CatchupWork.cpp
index 760c15436c..b854e80414 100644
--- a/src/catchup/CatchupWork.cpp
+++ b/src/catchup/CatchupWork.cpp
@@ -77,7 +77,7 @@ setHerderStateTo(FileTransferInfo const& ft, uint32_t ledger, Application& app)
 
 CatchupWork::CatchupWork(Application& app,
                          CatchupConfiguration catchupConfiguration,
-                         std::set<std::shared_ptr<Bucket>> bucketsToRetain,
+                         std::set<std::shared_ptr<LiveBucket>> bucketsToRetain,
                          std::shared_ptr<HistoryArchive> archive)
     : Work(app, "catchup", BasicWork::RETRY_NEVER)
     , mLocalState{app.getLedgerManager().getLastClosedLedgerHAS()}
@@ -214,10 +214,7 @@ CatchupWork::downloadApplyBuckets()
     // the database. This guarantees that we clear that state the next time
     // the application starts.
     auto& ps = mApp.getPersistentState();
-    for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-    {
-        ps.setRebuildForType(static_cast<LedgerEntryType>(let));
-    }
+    ps.setRebuildForOfferTable();
 
     std::vector<std::shared_ptr<BasicWork>> seq;
     auto version = mApp.getConfig().LEDGER_PROTOCOL_VERSION;
@@ -245,20 +242,8 @@ CatchupWork::downloadApplyBuckets()
         version = mVerifiedLedgerRangeStart.header.ledgerVersion;
     }
 
-    std::shared_ptr<ApplyBucketsWork> applyBuckets;
-    if (mApp.getConfig().isUsingBucketListDB())
-    {
-        // Only apply unsupported BucketListDB types to SQL DB when BucketList
-        // lookup is enabled
-        applyBuckets = std::make_shared<ApplyBucketsWork>(
-            mApp, mBuckets, *mBucketHAS, version,
-            BucketIndex::typeNotSupported);
-    }
-    else
-    {
-        applyBuckets = std::make_shared<ApplyBucketsWork>(mApp, mBuckets,
-                                                          *mBucketHAS, version);
-    }
+    auto applyBuckets = std::make_shared<ApplyBucketsWork>(
+        mApp, mBuckets, *mBucketHAS, version);
     seq.push_back(applyBuckets);
     return std::make_shared<WorkSequence>(mApp, "download-verify-apply-buckets",
                                           seq, RETRY_NEVER);
@@ -531,10 +516,7 @@ CatchupWork::runCatchupStep()
                 // persistently available locally so it will return us to the
                 // correct state.
                 auto& ps = mApp.getPersistentState();
-                for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-                {
-                    ps.clearRebuildForType(static_cast<LedgerEntryType>(let));
-                }
+                ps.clearRebuildForOfferTable();
             }
         }
         else if (mTransactionsVerifyApplySeq)
diff --git a/src/catchup/CatchupWork.h b/src/catchup/CatchupWork.h
index ed36c75f5c..d650bbc910 100644
--- a/src/catchup/CatchupWork.h
+++ b/src/catchup/CatchupWork.h
@@ -47,7 +47,7 @@ class CatchupWork : public Work
   protected:
     HistoryArchiveState mLocalState;
     std::unique_ptr<TmpDir> mDownloadDir;
-    std::map<std::string, std::shared_ptr<Bucket>> mBuckets;
+    std::map<std::string, std::shared_ptr<LiveBucket>> mBuckets;
 
     void doReset() override;
     BasicWork::State doWork() override;
@@ -65,7 +65,7 @@ class CatchupWork : public Work
     static uint32_t const PUBLISH_QUEUE_MAX_SIZE;
 
     CatchupWork(Application& app, CatchupConfiguration catchupConfiguration,
-                std::set<std::shared_ptr<Bucket>> bucketsToRetain,
+                std::set<std::shared_ptr<LiveBucket>> bucketsToRetain,
                 std::shared_ptr<HistoryArchive> archive = nullptr);
     virtual ~CatchupWork();
     std::string getStatus() const override;
@@ -128,6 +128,6 @@ class CatchupWork : public Work
 
     std::optional<HistoryArchiveState> mHAS;
     std::optional<HistoryArchiveState> mBucketHAS;
-    std::set<std::shared_ptr<Bucket>> mRetainedBuckets;
+    std::set<std::shared_ptr<LiveBucket>> mRetainedBuckets;
 };
 }
diff --git a/src/catchup/DownloadApplyTxsWork.cpp b/src/catchup/DownloadApplyTxsWork.cpp
index 1746060d69..b91a9f7f9f 100644
--- a/src/catchup/DownloadApplyTxsWork.cpp
+++ b/src/catchup/DownloadApplyTxsWork.cpp
@@ -83,7 +83,7 @@ DownloadApplyTxsWork::yieldMoreWork()
     auto maybeWaitForMerges = [](Application& app) {
         if (app.getConfig().CATCHUP_WAIT_MERGES_TX_APPLY_FOR_TESTING)
         {
-            auto& bl = app.getBucketManager().getBucketList();
+            auto& bl = app.getBucketManager().getLiveBucketList();
             bl.resolveAnyReadyFutures();
             return bl.futuresAllResolved();
         }
diff --git a/src/catchup/IndexBucketsWork.cpp b/src/catchup/IndexBucketsWork.cpp
index 5019b48757..32eb6598b3 100644
--- a/src/catchup/IndexBucketsWork.cpp
+++ b/src/catchup/IndexBucketsWork.cpp
@@ -15,7 +15,7 @@
 namespace stellar
 {
 IndexBucketsWork::IndexWork::IndexWork(Application& app,
-                                       std::shared_ptr<Bucket> b)
+                                       std::shared_ptr<LiveBucket> b)
     : BasicWork(app, "index-work", BasicWork::RETRY_NEVER), mBucket(b)
 {
 }
@@ -57,7 +57,7 @@ IndexBucketsWork::IndexWork::postWork()
             auto indexFilename =
                 bm.bucketIndexFilename(self->mBucket->getHash());
 
-            if (bm.getConfig().isPersistingBucketListDBIndexes() &&
+            if (bm.getConfig().BUCKETLIST_DB_PERSIST_INDEX &&
                 fs::exists(indexFilename))
             {
                 self->mIndex = BucketIndex::load(bm, indexFilename,
@@ -80,7 +80,8 @@ IndexBucketsWork::IndexWork::postWork()
 
             if (!self->mIndex)
             {
-                self->mIndex = BucketIndex::createIndex(
+                // TODO: Fix this when archive BucketLists assume state
+                self->mIndex = BucketIndex::createIndex<BucketEntry>(
                     bm, self->mBucket->getFilename(), self->mBucket->getHash());
             }
 
@@ -104,7 +105,7 @@ IndexBucketsWork::IndexWork::postWork()
 }
 
 IndexBucketsWork::IndexBucketsWork(
-    Application& app, std::vector<std::shared_ptr<Bucket>> const& buckets)
+    Application& app, std::vector<std::shared_ptr<LiveBucket>> const& buckets)
     : Work(app, "index-bucketList", BasicWork::RETRY_NEVER), mBuckets(buckets)
 {
 }
@@ -130,7 +131,7 @@ void
 IndexBucketsWork::spawnWork()
 {
     UnorderedSet<Hash> indexedBuckets;
-    auto spawnIndexWork = [&](std::shared_ptr<Bucket> const& b) {
+    auto spawnIndexWork = [&](std::shared_ptr<LiveBucket> const& b) {
         // Don't index empty bucket or buckets that are already being
         // indexed. Sometimes one level's snap bucket may be another
         // level's future bucket. The indexing job may have started but
diff --git a/src/catchup/IndexBucketsWork.h b/src/catchup/IndexBucketsWork.h
index 65a0f0e18a..ed44289c4e 100644
--- a/src/catchup/IndexBucketsWork.h
+++ b/src/catchup/IndexBucketsWork.h
@@ -13,33 +13,34 @@ namespace stellar
 class Bucket;
 class BucketIndex;
 class BucketManager;
+class LiveBucket;
 
 class IndexBucketsWork : public Work
 {
     class IndexWork : public BasicWork
     {
-        std::shared_ptr<Bucket> mBucket;
+        std::shared_ptr<LiveBucket> mBucket;
         std::unique_ptr<BucketIndex const> mIndex;
         bool mDone{false};
 
         void postWork();
 
       public:
-        IndexWork(Application& app, std::shared_ptr<Bucket> b);
+        IndexWork(Application& app, std::shared_ptr<LiveBucket> b);
 
       protected:
         State onRun() override;
         bool onAbort() override;
     };
 
-    std::vector<std::shared_ptr<Bucket>> const& mBuckets;
+    std::vector<std::shared_ptr<LiveBucket>> const& mBuckets;
 
     bool mWorkSpawned{false};
     void spawnWork();
 
   public:
     IndexBucketsWork(Application& app,
-                     std::vector<std::shared_ptr<Bucket>> const& buckets);
+                     std::vector<std::shared_ptr<LiveBucket>> const& buckets);
 
   protected:
     State doWork() override;
diff --git a/src/database/Database.cpp b/src/database/Database.cpp
index e06f1ff016..433df01bff 100644
--- a/src/database/Database.cpp
+++ b/src/database/Database.cpp
@@ -248,13 +248,9 @@ Database::upgradeToCurrentSchema()
         putSchemaVersion(vers);
     }
 
-    // While not really a schema upgrade, we need to upgrade the DB when
-    // BucketListDB is enabled.
-    if (mApp.getConfig().isUsingBucketListDB())
-    {
-        // Tx meta column no longer supported in BucketListDB
-        dropTxMetaIfExists();
-    }
+    // Tx meta column no longer supported
+    dropTxMetaIfExists();
+    maybeUpgradeToBucketListDB();
 
     CLOG_INFO(Database, "DB schema is in current version");
     releaseAssert(vers == SCHEMA_VERSION);
@@ -294,6 +290,50 @@ Database::dropTxMetaIfExists()
     }
 }
 
+void
+Database::maybeUpgradeToBucketListDB()
+{
+    if (mApp.getPersistentState().getState(PersistentState::kDBBackend) !=
+        BucketIndex::DB_BACKEND_STATE)
+    {
+        CLOG_INFO(Database, "Upgrading to BucketListDB");
+
+        // Drop all LedgerEntry tables except for offers
+        CLOG_INFO(Database, "Dropping table accounts");
+        getSession() << "DROP TABLE IF EXISTS accounts;";
+
+        CLOG_INFO(Database, "Dropping table signers");
+        getSession() << "DROP TABLE IF EXISTS signers;";
+
+        CLOG_INFO(Database, "Dropping table claimablebalance");
+        getSession() << "DROP TABLE IF EXISTS claimablebalance;";
+
+        CLOG_INFO(Database, "Dropping table configsettings");
+        getSession() << "DROP TABLE IF EXISTS configsettings;";
+
+        CLOG_INFO(Database, "Dropping table contractcode");
+        getSession() << "DROP TABLE IF EXISTS contractcode;";
+
+        CLOG_INFO(Database, "Dropping table contractdata");
+        getSession() << "DROP TABLE IF EXISTS contractdata;";
+
+        CLOG_INFO(Database, "Dropping table accountdata");
+        getSession() << "DROP TABLE IF EXISTS accountdata;";
+
+        CLOG_INFO(Database, "Dropping table liquiditypool");
+        getSession() << "DROP TABLE IF EXISTS liquiditypool;";
+
+        CLOG_INFO(Database, "Dropping table trustlines");
+        getSession() << "DROP TABLE IF EXISTS trustlines;";
+
+        CLOG_INFO(Database, "Dropping table ttl");
+        getSession() << "DROP TABLE IF EXISTS ttl;";
+
+        mApp.getPersistentState().setState(PersistentState::kDBBackend,
+                                           BucketIndex::DB_BACKEND_STATE);
+    }
+}
+
 void
 Database::putSchemaVersion(unsigned long vers)
 {
diff --git a/src/database/Database.h b/src/database/Database.h
index e3ad43b214..73540c2884 100644
--- a/src/database/Database.h
+++ b/src/database/Database.h
@@ -174,6 +174,7 @@ class Database : NonMovableOrCopyable
     void upgradeToCurrentSchema();
 
     void dropTxMetaIfExists();
+    void maybeUpgradeToBucketListDB();
 
     // Access the underlying SOCI session object
     soci::session& getSession();
diff --git a/src/database/test/DatabaseTests.cpp b/src/database/test/DatabaseTests.cpp
index 4a17cd565c..c2fc838bd3 100644
--- a/src/database/test/DatabaseTests.cpp
+++ b/src/database/test/DatabaseTests.cpp
@@ -72,7 +72,7 @@ transactionTest(Application::pointer app)
 
 TEST_CASE("database smoketest", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg, true, false);
@@ -81,7 +81,7 @@ TEST_CASE("database smoketest", "[db]")
 
 TEST_CASE("database on-disk smoketest", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT);
 
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg, true, false);
@@ -201,7 +201,7 @@ checkMVCCIsolation(Application::pointer app)
 
 TEST_CASE("sqlite MVCC test", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_BUCKET_DB_PERSISTENT);
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg, true, false);
     checkMVCCIsolation(app);
@@ -349,7 +349,7 @@ TEST_CASE("postgres performance", "[db][pgperf][!hide]")
 
 TEST_CASE("schema test", "[db]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg);
diff --git a/src/herder/test/HerderTests.cpp b/src/herder/test/HerderTests.cpp
index 9366adb7c3..0b6dd1d419 100644
--- a/src/herder/test/HerderTests.cpp
+++ b/src/herder/test/HerderTests.cpp
@@ -1135,7 +1135,7 @@ TEST_CASE("surge pricing", "[herder][txset][soroban]")
 {
     SECTION("max 0 ops per ledger")
     {
-        Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
         cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 0;
 
         VirtualClock clock;
@@ -2564,11 +2564,6 @@ TEST_CASE("SCP State", "[herder]")
         };
 
     auto doTest = [&](bool forceSCP) {
-        SECTION("sqlite")
-        {
-            configure(Config::TestDbMode::TESTDB_ON_DISK_SQLITE);
-        }
-
         SECTION("bucketlistDB")
         {
             configure(Config::TestDbMode::TESTDB_BUCKET_DB_PERSISTENT);
@@ -3258,7 +3253,7 @@ TEST_CASE("accept soroban txs after network upgrade", "[soroban][herder]")
 
     auto simulation =
         Topologies::core(4, 1, Simulation::OVER_LOOPBACK, networkID, [](int i) {
-            auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE);
+            auto cfg = getTestConfig(i, Config::TESTDB_IN_MEMORY);
             cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 100;
             cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
                 static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION) - 1;
@@ -3687,7 +3682,7 @@ herderExternalizesValuesWithProtocol(uint32_t version)
     auto networkID = sha256(getTestConfig().NETWORK_PASSPHRASE);
     auto simulation = std::make_shared<Simulation>(
         Simulation::OVER_LOOPBACK, networkID, [version](int i) {
-            auto cfg = getTestConfig(i, Config::TESTDB_ON_DISK_SQLITE);
+            auto cfg = getTestConfig(i, Config::TESTDB_BUCKET_DB_PERSISTENT);
             cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = version;
             return cfg;
         });
diff --git a/src/herder/test/UpgradesTests.cpp b/src/herder/test/UpgradesTests.cpp
index 3a1b3adf56..59635fcead 100644
--- a/src/herder/test/UpgradesTests.cpp
+++ b/src/herder/test/UpgradesTests.cpp
@@ -374,7 +374,7 @@ void
 testValidateUpgrades(VirtualClock::system_time_point preferredUpgradeDatetime,
                      bool canBeValid)
 {
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 10;
     cfg.TESTING_UPGRADE_DESIRED_FEE = 100;
     cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 50;
@@ -632,7 +632,7 @@ TEST_CASE("Ledger Manager applies upgrades properly", "[upgrades]")
 TEST_CASE("config upgrade validation", "[upgrades]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     auto app = createTestApplication(clock, cfg);
 
     auto headerTime = VirtualClock::to_time_t(genesis(0, 2));
@@ -828,7 +828,7 @@ TEST_CASE("config upgrade validation", "[upgrades]")
 TEST_CASE("config upgrades applied to ledger", "[soroban][upgrades]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION =
         static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION) - 1;
     cfg.USE_CONFIG_FOR_GENESIS = false;
@@ -1984,7 +1984,7 @@ TEST_CASE("upgrade to version 11", "[upgrades]")
             app->getConfig().NODE_SEED);
         lm.closeLedger(LedgerCloseData(ledgerSeq, txSet, sv));
         auto& bm = app->getBucketManager();
-        auto& bl = bm.getBucketList();
+        auto& bl = bm.getLiveBucketList();
         while (!bl.futuresAllResolved())
         {
             std::this_thread::sleep_for(std::chrono::milliseconds(10));
@@ -1998,16 +1998,17 @@ TEST_CASE("upgrade to version 11", "[upgrades]")
                   ledgerSeq, mc.mPreInitEntryProtocolMerges,
                   mc.mPostInitEntryProtocolMerges, mc.mNewInitEntries,
                   mc.mOldInitEntries);
-        for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+        for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
         {
-            auto& lev = bm.getBucketList().getLevel(level);
+            auto& lev = bm.getLiveBucketList().getLevel(level);
             BucketTestUtils::EntryCounts currCounts(lev.getCurr());
             BucketTestUtils::EntryCounts snapCounts(lev.getSnap());
             CLOG_INFO(
                 Bucket,
                 "post-ledger {} close, init counts: level {}, {} in curr, "
                 "{} in snap",
-                ledgerSeq, level, currCounts.nInit, snapCounts.nInit);
+                ledgerSeq, level, currCounts.nInitOrArchived,
+                snapCounts.nInitOrArchived);
         }
         if (ledgerSeq < 5)
         {
@@ -2030,8 +2031,8 @@ TEST_CASE("upgrade to version 11", "[upgrades]")
             //   - From 8 on, the INITENTRYs propagate to lev[1].curr
             REQUIRE(mc.mPreInitEntryProtocolMerges == 5);
             REQUIRE(mc.mPostInitEntryProtocolMerges != 0);
-            auto& lev0 = bm.getBucketList().getLevel(0);
-            auto& lev1 = bm.getBucketList().getLevel(1);
+            auto& lev0 = bm.getLiveBucketList().getLevel(0);
+            auto& lev1 = bm.getLiveBucketList().getLevel(1);
             auto lev0Curr = lev0.getCurr();
             auto lev0Snap = lev0.getSnap();
             auto lev1Curr = lev1.getCurr();
@@ -2039,22 +2040,22 @@ TEST_CASE("upgrade to version 11", "[upgrades]")
             BucketTestUtils::EntryCounts lev0CurrCounts(lev0Curr);
             BucketTestUtils::EntryCounts lev0SnapCounts(lev0Snap);
             BucketTestUtils::EntryCounts lev1CurrCounts(lev1Curr);
-            auto getVers = [](std::shared_ptr<Bucket> b) -> uint32_t {
-                return BucketInputIterator(b).getMetadata().ledgerVersion;
+            auto getVers = [](std::shared_ptr<LiveBucket> b) -> uint32_t {
+                return LiveBucketInputIterator(b).getMetadata().ledgerVersion;
             };
             switch (ledgerSeq)
             {
             default:
             case 8:
                 REQUIRE(getVers(lev1Curr) == newProto);
-                REQUIRE(lev1CurrCounts.nInit != 0);
+                REQUIRE(lev1CurrCounts.nInitOrArchived != 0);
             case 7:
             case 6:
                 REQUIRE(getVers(lev0Snap) == newProto);
-                REQUIRE(lev0SnapCounts.nInit != 0);
+                REQUIRE(lev0SnapCounts.nInitOrArchived != 0);
             case 5:
                 REQUIRE(getVers(lev0Curr) == newProto);
-                REQUIRE(lev0CurrCounts.nInit != 0);
+                REQUIRE(lev0CurrCounts.nInitOrArchived != 0);
             }
         }
     }
@@ -2108,7 +2109,7 @@ TEST_CASE("upgrade to version 12", "[upgrades]")
             app->getConfig().NODE_SEED);
         lm.closeLedger(LedgerCloseData(ledgerSeq, txSet, sv));
         auto& bm = app->getBucketManager();
-        auto& bl = bm.getBucketList();
+        auto& bl = bm.getLiveBucketList();
         while (!bl.futuresAllResolved())
         {
             std::this_thread::sleep_for(std::chrono::milliseconds(10));
@@ -2122,14 +2123,14 @@ TEST_CASE("upgrade to version 12", "[upgrades]")
         }
         else
         {
-            auto& lev0 = bm.getBucketList().getLevel(0);
-            auto& lev1 = bm.getBucketList().getLevel(1);
+            auto& lev0 = bm.getLiveBucketList().getLevel(0);
+            auto& lev1 = bm.getLiveBucketList().getLevel(1);
             auto lev0Curr = lev0.getCurr();
             auto lev0Snap = lev0.getSnap();
             auto lev1Curr = lev1.getCurr();
             auto lev1Snap = lev1.getSnap();
-            auto getVers = [](std::shared_ptr<Bucket> b) -> uint32_t {
-                return BucketInputIterator(b).getMetadata().ledgerVersion;
+            auto getVers = [](std::shared_ptr<LiveBucket> b) -> uint32_t {
+                return LiveBucketInputIterator(b).getMetadata().ledgerVersion;
             };
             switch (ledgerSeq)
             {
@@ -2233,7 +2234,7 @@ TEST_CASE("configuration initialized in version upgrade", "[upgrades]")
         REQUIRE(!ltx.load(getMaxContractSizeKey()));
     }
 
-    auto blSize = app->getBucketManager().getBucketList().getSize();
+    auto blSize = app->getBucketManager().getLiveBucketList().getSize();
     executeUpgrade(*app, makeProtocolVersionUpgrade(
                              static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION)));
 
@@ -2275,7 +2276,7 @@ TEST_CASE_VERSIONS("upgrade base reserve", "[upgrades]")
 {
     VirtualClock clock;
 
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     auto app = createTestApplication(clock, cfg);
 
     auto& lm = app->getLedgerManager();
@@ -2974,7 +2975,7 @@ TEST_CASE("upgrade from cpp14 serialized data", "[upgrades]")
 
 TEST_CASE("upgrades serialization roundtrip", "[upgrades]")
 {
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
 
@@ -3058,7 +3059,7 @@ TEST_CASE("upgrades serialization roundtrip", "[upgrades]")
 TEST_CASE_VERSIONS("upgrade flags", "[upgrades][liquiditypool]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
diff --git a/src/history/FileTransferInfo.h b/src/history/FileTransferInfo.h
index 1e66b3f8ab..daeebddcc7 100644
--- a/src/history/FileTransferInfo.h
+++ b/src/history/FileTransferInfo.h
@@ -28,7 +28,7 @@ class FileTransferInfo
     std::string getLocalDir(TmpDir const& localRoot) const;
 
   public:
-    FileTransferInfo(Bucket const& bucket)
+    FileTransferInfo(LiveBucket const& bucket)
         : mType(HISTORY_FILE_TYPE_BUCKET)
         , mHexDigits(binToHex(bucket.getHash()))
         , mLocalPath(bucket.getFilename().string())
diff --git a/src/history/HistoryArchive.cpp b/src/history/HistoryArchive.cpp
index a627b98c81..a2f8992547 100644
--- a/src/history/HistoryArchive.cpp
+++ b/src/history/HistoryArchive.cpp
@@ -246,7 +246,7 @@ HistoryArchiveState::differingBuckets(HistoryArchiveState const& other) const
         inhibit.insert(b.snap);
     }
     std::vector<std::string> ret;
-    for (size_t i = BucketList::kNumLevels; i != 0; --i)
+    for (size_t i = LiveBucketList::kNumLevels; i != 0; --i)
     {
         auto s = currentBuckets[i - 1].snap;
         auto n = s;
@@ -307,12 +307,12 @@ HistoryArchiveState::containsValidBuckets(Application& app) const
     // Process bucket, return version
     auto processBucket = [&](std::string const& bucketHash) {
         auto bucket =
-            app.getBucketManager().getBucketByHash(hexToBin256(bucketHash));
+            app.getBucketManager().getLiveBucketByHash(hexToBin256(bucketHash));
         releaseAssert(bucket);
         int32_t version = 0;
         if (!bucket->isEmpty())
         {
-            version = Bucket::getBucketVersion(bucket);
+            version = bucket->getBucketVersion();
             if (!nonEmptySeen)
             {
                 nonEmptySeen = true;
@@ -322,7 +322,7 @@ HistoryArchiveState::containsValidBuckets(Application& app) const
     };
 
     // Iterate bottom-up, from oldest to newest buckets
-    for (uint32_t j = BucketList::kNumLevels; j != 0; --j)
+    for (uint32_t j = LiveBucketList::kNumLevels; j != 0; --j)
     {
         auto i = j - 1;
         auto const& level = currentBuckets[i];
@@ -358,7 +358,8 @@ HistoryArchiveState::containsValidBuckets(Application& app) const
             continue;
         }
         else if (protocolVersionStartsFrom(
-                     prevSnapVersion, Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+                     prevSnapVersion,
+                     LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
         {
             if (!level.next.isClear())
             {
@@ -384,16 +385,17 @@ HistoryArchiveState::prepareForPublish(Application& app)
     // Level 0 future buckets are always clear
     releaseAssert(currentBuckets[0].next.isClear());
 
-    for (uint32_t i = 1; i < BucketList::kNumLevels; i++)
+    for (uint32_t i = 1; i < LiveBucketList::kNumLevels; i++)
     {
         auto& level = currentBuckets[i];
         auto& prev = currentBuckets[i - 1];
 
         auto snap =
-            app.getBucketManager().getBucketByHash(hexToBin256(prev.snap));
+            app.getBucketManager().getLiveBucketByHash(hexToBin256(prev.snap));
         if (!level.next.isClear() &&
-            protocolVersionStartsFrom(Bucket::getBucketVersion(snap),
-                                      Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
+            protocolVersionStartsFrom(
+                snap->getBucketVersion(),
+                LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED))
         {
             level.next.clear();
         }
@@ -423,20 +425,20 @@ HistoryArchiveState::HistoryArchiveState() : server(STELLAR_CORE_VERSION)
     HistoryStateBucket b;
     b.curr = s;
     b.snap = s;
-    while (currentBuckets.size() < BucketList::kNumLevels)
+    while (currentBuckets.size() < LiveBucketList::kNumLevels)
     {
         currentBuckets.push_back(b);
     }
 }
 
 HistoryArchiveState::HistoryArchiveState(uint32_t ledgerSeq,
-                                         BucketList const& buckets,
+                                         LiveBucketList const& buckets,
                                          std::string const& passphrase)
     : server(STELLAR_CORE_VERSION)
     , networkPassphrase(passphrase)
     , currentLedger(ledgerSeq)
 {
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
         HistoryStateBucket b;
         auto& level = buckets.getLevel(i);
diff --git a/src/history/HistoryArchive.h b/src/history/HistoryArchive.h
index 1a70622dac..378716118d 100644
--- a/src/history/HistoryArchive.h
+++ b/src/history/HistoryArchive.h
@@ -27,13 +27,15 @@ namespace stellar
 {
 
 class Application;
-class BucketList;
+class LiveBucketList;
 class Bucket;
 
 struct HistoryStateBucket
 {
     std::string curr;
-    FutureBucket next;
+
+    // TODO: Add archival buckets to history
+    FutureBucket<LiveBucket> next;
     std::string snap;
 
     template <class Archive>
@@ -70,7 +72,7 @@ struct HistoryArchiveState
 
     HistoryArchiveState();
 
-    HistoryArchiveState(uint32_t ledgerSeq, BucketList const& buckets,
+    HistoryArchiveState(uint32_t ledgerSeq, LiveBucketList const& buckets,
                         std::string const& networkPassphrase);
 
     static std::string baseName();
diff --git a/src/history/HistoryManager.h b/src/history/HistoryManager.h
index d69f1c1bd8..732b1e1795 100644
--- a/src/history/HistoryManager.h
+++ b/src/history/HistoryManager.h
@@ -180,7 +180,7 @@ namespace stellar
 {
 class Application;
 class Bucket;
-class BucketList;
+class LiveBucketList;
 class Config;
 class Database;
 class HistoryArchive;
diff --git a/src/history/HistoryManagerImpl.cpp b/src/history/HistoryManagerImpl.cpp
index 6eb8a257a3..57584aea8e 100644
--- a/src/history/HistoryManagerImpl.cpp
+++ b/src/history/HistoryManagerImpl.cpp
@@ -221,10 +221,10 @@ HistoryManagerImpl::queueCurrentHistory()
     ZoneScoped;
     auto ledger = mApp.getLedgerManager().getLastClosedLedgerNum();
 
-    BucketList bl;
+    LiveBucketList bl;
     if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
     {
-        bl = mApp.getBucketManager().getBucketList();
+        bl = mApp.getBucketManager().getLiveBucketList();
     }
 
     HistoryArchiveState has(ledger, bl, mApp.getConfig().NETWORK_PASSPHRASE);
diff --git a/src/history/StateSnapshot.cpp b/src/history/StateSnapshot.cpp
index 6c03902a2c..d8de6ddd51 100644
--- a/src/history/StateSnapshot.cpp
+++ b/src/history/StateSnapshot.cpp
@@ -41,7 +41,7 @@ StateSnapshot::StateSnapshot(Application& app, HistoryArchiveState const& state)
           mSnapDir, HISTORY_FILE_TYPE_SCP, mLocalState.currentLedger))
 
 {
-    if (mLocalState.currentBuckets.size() != BucketList::kNumLevels)
+    if (mLocalState.currentBuckets.size() != LiveBucketList::kNumLevels)
     {
         throw std::runtime_error("Invalid HAS: malformed bucketlist");
     }
@@ -147,7 +147,7 @@ StateSnapshot::differingHASFiles(HistoryArchiveState const& other)
 
     for (auto const& hash : mLocalState.differingBuckets(other))
     {
-        auto b = mApp.getBucketManager().getBucketByHash(hexToBin256(hash));
+        auto b = mApp.getBucketManager().getLiveBucketByHash(hexToBin256(hash));
         releaseAssert(b);
         addIfExists(std::make_shared<FileTransferInfo>(*b));
     }
diff --git a/src/history/test/HistoryTests.cpp b/src/history/test/HistoryTests.cpp
index 2b882a9099..244270b7d5 100644
--- a/src/history/test/HistoryTests.cpp
+++ b/src/history/test/HistoryTests.cpp
@@ -146,7 +146,7 @@ TEST_CASE("History bucket verification", "[history][catchup]")
                   cg->getArchiveDirName())};
     std::vector<std::string> hashes;
     auto& wm = app->getWorkScheduler();
-    std::map<std::string, std::shared_ptr<Bucket>> mBuckets;
+    std::map<std::string, std::shared_ptr<LiveBucket>> mBuckets;
     auto tmpDir =
         std::make_unique<TmpDir>(app->getTmpDirManager().tmpDir("bucket-test"));
 
@@ -535,7 +535,7 @@ TEST_CASE("Publish works correctly post shadow removal", "[history]")
         // Perform publish: 2 checkpoints (or 127 ledgers) correspond to 3
         // levels being initialized and partially filled in the bucketlist
         sim.setUpgradeLedger(upgradeLedger,
-                             Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED);
+                             LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED);
         auto checkpointLedger = sim.getLastCheckpointLedger(2);
         auto maxLevelTouched = 3;
         sim.ensureOfflineCatchupPossible(checkpointLedger);
@@ -554,7 +554,7 @@ TEST_CASE("Publish works correctly post shadow removal", "[history]")
                                         configurator};
 
     uint32_t oldProto =
-        static_cast<uint32_t>(Bucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - 1;
+        static_cast<uint32_t>(LiveBucket::FIRST_PROTOCOL_SHADOWS_REMOVED) - 1;
     catchupSimulation.generateRandomLedger(oldProto);
 
     // The next sections reflect how future buckets in HAS change, depending on
@@ -613,10 +613,8 @@ dbModeName(Config::TestDbMode mode)
 {
     switch (mode)
     {
-    case Config::TESTDB_IN_MEMORY_OFFERS:
-        return "TESTDB_IN_MEMORY_OFFERS";
-    case Config::TESTDB_ON_DISK_SQLITE:
-        return "TESTDB_ON_DISK_SQLITE";
+    case Config::TESTDB_IN_MEMORY:
+        return "TESTDB_IN_MEMORY";
 #ifdef USE_POSTGRES
     case Config::TESTDB_POSTGRESQL:
         return "TESTDB_POSTGRESQL";
@@ -749,7 +747,7 @@ TEST_CASE("History catchup with different modes",
                                     60};
 
     std::vector<Config::TestDbMode> dbModes = {
-        Config::TESTDB_ON_DISK_SQLITE, Config::TESTDB_BUCKET_DB_PERSISTENT};
+        Config::TESTDB_BUCKET_DB_PERSISTENT};
 #ifdef USE_POSTGRES
     if (!force_sqlite)
         dbModes.push_back(Config::TESTDB_POSTGRESQL);
@@ -1031,7 +1029,7 @@ TEST_CASE("Catchup non-initentry buckets to initentry-supporting works",
           "[history][bucket][acceptance]")
 {
     uint32_t newProto = static_cast<uint32_t>(
-        Bucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
+        LiveBucket::FIRST_PROTOCOL_SUPPORTING_INITENTRY_AND_METAENTRY);
     uint32_t oldProto = newProto - 1;
     auto configurator =
         std::make_shared<RealGenesisTmpDirHistoryConfigurator>();
@@ -1205,14 +1203,14 @@ TEST_CASE_VERSIONS(
             Application::pointer app = createTestApplication(clock, cfg);
             auto& hm = app->getHistoryManager();
             auto& lm = app->getLedgerManager();
-            auto& bl = app->getBucketManager().getBucketList();
+            auto& bl = app->getBucketManager().getLiveBucketList();
 
             while (hm.getPublishQueueCount() != 1)
             {
                 auto lcl = lm.getLastClosedLedgerHeader();
                 lcl.header.ledgerSeq += 1;
-                BucketTestUtils::addBatchAndUpdateSnapshot(
-                    bl, *app, lcl.header, {},
+                BucketTestUtils::addLiveBatchAndUpdateSnapshot(
+                    *app, lcl.header, {},
                     LedgerTestUtils::generateValidUniqueLedgerEntries(8), {});
                 clock.crank(true);
             }
@@ -1230,7 +1228,7 @@ TEST_CASE_VERSIONS(
 
             // Second, ensure `next` is in the exact same state as when it was
             // queued
-            for (uint32_t i = 0; i < BucketList::kNumLevels; i++)
+            for (uint32_t i = 0; i < LiveBucketList::kNumLevels; i++)
             {
                 auto const& currentNext = bl.getLevel(i).getNext();
                 auto const& queuedNext = queuedHAS.currentBuckets[i].next;
diff --git a/src/history/test/HistoryTestsUtils.cpp b/src/history/test/HistoryTestsUtils.cpp
index 5119d372a4..ab47d2973e 100644
--- a/src/history/test/HistoryTestsUtils.cpp
+++ b/src/history/test/HistoryTestsUtils.cpp
@@ -139,7 +139,7 @@ BucketOutputIteratorForTesting::writeTmpTestBucket()
     auto ledgerEntries =
         LedgerTestUtils::generateValidUniqueLedgerEntries(NUM_ITEMS_PER_BUCKET);
     auto bucketEntries =
-        Bucket::convertToBucketEntry(false, {}, ledgerEntries, {});
+        LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {});
     for (auto const& bucketEntry : bucketEntries)
     {
         put(bucketEntry);
@@ -543,12 +543,12 @@ CatchupSimulation::generateRandomLedger(uint32_t version)
     mLedgerHashes.push_back(lclh.hash);
     mBucketListHashes.push_back(lclh.header.bucketListHash);
     mBucket0Hashes.push_back(mApp.getBucketManager()
-                                 .getBucketList()
+                                 .getLiveBucketList()
                                  .getLevel(0)
                                  .getCurr()
                                  ->getHash());
     mBucket1Hashes.push_back(mApp.getBucketManager()
-                                 .getBucketList()
+                                 .getLiveBucketList()
                                  .getLevel(2)
                                  .getCurr()
                                  ->getHash());
@@ -600,7 +600,7 @@ CatchupSimulation::ensureLedgerAvailable(uint32_t targetLedger)
         if (hm.publishCheckpointOnLedgerClose(lcl))
         {
             mBucketListAtLastPublish =
-                getApp().getBucketManager().getBucketList();
+                getApp().getBucketManager().getLiveBucketList();
         }
     }
 }
@@ -950,12 +950,12 @@ CatchupSimulation::validateCatchup(Application::pointer app)
     auto haveBucketListHash =
         lm.getLastClosedLedgerHeader().header.bucketListHash;
     auto haveBucket0Hash = app->getBucketManager()
-                               .getBucketList()
+                               .getLiveBucketList()
                                .getLevel(0)
                                .getCurr()
                                ->getHash();
     auto haveBucket1Hash = app->getBucketManager()
-                               .getBucketList()
+                               .getLiveBucketList()
                                .getLevel(2)
                                .getCurr()
                                ->getHash();
@@ -986,8 +986,8 @@ CatchupSimulation::validateCatchup(Application::pointer app)
     CHECK(wantBucketListHash == haveBucketListHash);
     CHECK(wantHash == haveHash);
 
-    CHECK(app->getBucketManager().getBucketByHash(wantBucket0Hash));
-    CHECK(app->getBucketManager().getBucketByHash(wantBucket1Hash));
+    CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket0Hash));
+    CHECK(app->getBucketManager().getLiveBucketByHash(wantBucket1Hash));
     CHECK(wantBucket0Hash == haveBucket0Hash);
     CHECK(wantBucket1Hash == haveBucket1Hash);
 
diff --git a/src/history/test/HistoryTestsUtils.h b/src/history/test/HistoryTestsUtils.h
index eace1f3a6e..8c886c152a 100644
--- a/src/history/test/HistoryTestsUtils.h
+++ b/src/history/test/HistoryTestsUtils.h
@@ -98,7 +98,7 @@ class RealGenesisTmpDirHistoryConfigurator : public TmpDirHistoryConfigurator
     Config& configure(Config& cfg, bool writable) const override;
 };
 
-class BucketOutputIteratorForTesting : public BucketOutputIterator
+class BucketOutputIteratorForTesting : public LiveBucketOutputIterator
 {
     const size_t NUM_ITEMS_PER_BUCKET = 5;
 
@@ -185,7 +185,7 @@ class CatchupSimulation
     std::vector<Config> mCfgs;
     Application::pointer mAppPtr;
     Application& mApp;
-    BucketList mBucketListAtLastPublish;
+    LiveBucketList mBucketListAtLastPublish;
 
     std::vector<LedgerCloseData> mLedgerCloseDatas;
 
diff --git a/src/historywork/DownloadBucketsWork.cpp b/src/historywork/DownloadBucketsWork.cpp
index 2dcea7ba61..2606a695ae 100644
--- a/src/historywork/DownloadBucketsWork.cpp
+++ b/src/historywork/DownloadBucketsWork.cpp
@@ -17,7 +17,8 @@ namespace stellar
 {
 
 DownloadBucketsWork::DownloadBucketsWork(
-    Application& app, std::map<std::string, std::shared_ptr<Bucket>>& buckets,
+    Application& app,
+    std::map<std::string, std::shared_ptr<LiveBucket>>& buckets,
     std::vector<std::string> hashes, TmpDir const& downloadDir,
     std::shared_ptr<HistoryArchive> archive)
     : BatchWork{app, "download-verify-buckets"}
@@ -94,7 +95,7 @@ DownloadBucketsWork::yieldMoreWork()
         if (self)
         {
             auto bucketPath = ft.localPath_nogz();
-            auto b = app.getBucketManager().adoptFileAsBucket(
+            auto b = app.getBucketManager().adoptFileAsLiveBucket(
                 bucketPath, hexToBin256(hash),
                 /*mergeKey=*/nullptr,
                 /*index=*/nullptr);
diff --git a/src/historywork/DownloadBucketsWork.h b/src/historywork/DownloadBucketsWork.h
index b55942eeb3..52db6cd968 100644
--- a/src/historywork/DownloadBucketsWork.h
+++ b/src/historywork/DownloadBucketsWork.h
@@ -17,18 +17,18 @@ class HistoryArchive;
 
 class DownloadBucketsWork : public BatchWork
 {
-    std::map<std::string, std::shared_ptr<Bucket>>& mBuckets;
+    std::map<std::string, std::shared_ptr<LiveBucket>>& mBuckets;
     std::vector<std::string> mHashes;
     std::vector<std::string>::const_iterator mNextBucketIter;
     TmpDir const& mDownloadDir;
     std::shared_ptr<HistoryArchive> mArchive;
 
   public:
-    DownloadBucketsWork(Application& app,
-                        std::map<std::string, std::shared_ptr<Bucket>>& buckets,
-                        std::vector<std::string> hashes,
-                        TmpDir const& downloadDir,
-                        std::shared_ptr<HistoryArchive> archive = nullptr);
+    DownloadBucketsWork(
+        Application& app,
+        std::map<std::string, std::shared_ptr<LiveBucket>>& buckets,
+        std::vector<std::string> hashes, TmpDir const& downloadDir,
+        std::shared_ptr<HistoryArchive> archive = nullptr);
     ~DownloadBucketsWork() = default;
     std::string getStatus() const override;
 
diff --git a/src/invariant/BucketListIsConsistentWithDatabase.cpp b/src/invariant/BucketListIsConsistentWithDatabase.cpp
index e12da7b724..dfd00554c3 100644
--- a/src/invariant/BucketListIsConsistentWithDatabase.cpp
+++ b/src/invariant/BucketListIsConsistentWithDatabase.cpp
@@ -17,6 +17,7 @@
 #include "main/Application.h"
 #include "main/PersistentState.h"
 #include "medida/timer.h"
+#include "util/GlobalChecks.h"
 #include "util/XDRCereal.h"
 #include <chrono>
 #include <fmt/chrono.h>
@@ -26,7 +27,9 @@
 namespace stellar
 {
 
-static std::string
+namespace
+{
+std::string
 checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry)
 {
     auto fromDb = ltx.loadWithoutRecord(LedgerEntryKey(entry));
@@ -51,7 +54,7 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerEntry const& entry)
     }
 }
 
-static std::string
+std::string
 checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key)
 {
     auto fromDb = ltx.loadWithoutRecord(key);
@@ -65,6 +68,25 @@ checkAgainstDatabase(AbstractLedgerTxn& ltx, LedgerKey const& key)
     return s;
 }
 
+std::string
+checkDbEntryCounts(Application& app, LedgerRange const& range,
+                   uint64_t expectedOfferCount)
+{
+    std::string msg;
+    auto& ltxRoot = app.getLedgerTxnRoot();
+    uint64_t numInDb = ltxRoot.countOffers(range);
+    if (numInDb != expectedOfferCount)
+    {
+        msg = fmt::format(
+            FMT_STRING("Incorrect OFFER count: Bucket = {:d} Database "
+                       "= {:d}"),
+            expectedOfferCount, numInDb);
+    }
+
+    return msg;
+}
+}
+
 std::shared_ptr<Invariant>
 BucketListIsConsistentWithDatabase::registerInvariant(Application& app)
 {
@@ -84,103 +106,6 @@ BucketListIsConsistentWithDatabase::getName() const
     return "BucketListIsConsistentWithDatabase";
 }
 
-struct EntryCounts
-{
-    uint64_t mAccounts{0};
-    uint64_t mTrustLines{0};
-    uint64_t mOffers{0};
-    uint64_t mData{0};
-    uint64_t mClaimableBalance{0};
-    uint64_t mLiquidityPool{0};
-    uint64_t mContractData{0};
-    uint64_t mContractCode{0};
-    uint64_t mConfigSettings{0};
-    uint64_t mTTL{0};
-
-    uint64_t
-    totalEntries() const
-    {
-        return mAccounts + mTrustLines + mOffers + mData + mClaimableBalance +
-               mLiquidityPool + mContractData + mConfigSettings + mTTL;
-    }
-
-    void
-    countLiveEntry(LedgerEntry const& e)
-    {
-        switch (e.data.type())
-        {
-        case ACCOUNT:
-            ++mAccounts;
-            break;
-        case TRUSTLINE:
-            ++mTrustLines;
-            break;
-        case OFFER:
-            ++mOffers;
-            break;
-        case DATA:
-            ++mData;
-            break;
-        case CLAIMABLE_BALANCE:
-            ++mClaimableBalance;
-            break;
-        case LIQUIDITY_POOL:
-            ++mLiquidityPool;
-            break;
-        case CONTRACT_DATA:
-            ++mContractData;
-            break;
-        case CONTRACT_CODE:
-            ++mContractCode;
-            break;
-        case CONFIG_SETTING:
-            ++mConfigSettings;
-            break;
-        case TTL:
-            ++mTTL;
-            break;
-        default:
-            throw std::runtime_error(
-                fmt::format(FMT_STRING("unknown ledger entry type: {:d}"),
-                            static_cast<uint32_t>(e.data.type())));
-        }
-    }
-
-    std::string
-    checkDbEntryCounts(Application& app, LedgerRange const& range,
-                       std::function<bool(LedgerEntryType)> entryTypeFilter)
-    {
-        std::string msg;
-        auto check = [&](LedgerEntryType let, uint64_t numInBucket) {
-            if (entryTypeFilter(let))
-            {
-                auto& ltxRoot = app.getLedgerTxnRoot();
-                uint64_t numInDb = ltxRoot.countObjects(let, range);
-                if (numInDb != numInBucket)
-                {
-                    msg = fmt::format(
-                        FMT_STRING("Incorrect {} count: Bucket = {:d} Database "
-                                   "= {:d}"),
-                        xdr::xdr_traits<LedgerEntryType>::enum_name(let),
-                        numInBucket, numInDb);
-                    return false;
-                }
-            }
-            return true;
-        };
-
-        // Uses short-circuiting to make this compact
-        check(ACCOUNT, mAccounts) && check(TRUSTLINE, mTrustLines) &&
-            check(OFFER, mOffers) && check(DATA, mData) &&
-            check(CLAIMABLE_BALANCE, mClaimableBalance) &&
-            check(LIQUIDITY_POOL, mLiquidityPool) &&
-            check(CONTRACT_DATA, mContractData) &&
-            check(CONTRACT_CODE, mContractCode) &&
-            check(CONFIG_SETTING, mConfigSettings) && check(TTL, mTTL);
-        return msg;
-    }
-};
-
 void
 BucketListIsConsistentWithDatabase::checkEntireBucketlist()
 {
@@ -189,29 +114,29 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist()
     HistoryArchiveState has = lm.getLastClosedLedgerHAS();
     std::map<LedgerKey, LedgerEntry> bucketLedgerMap =
         bm.loadCompleteLedgerState(has);
-    EntryCounts counts;
+    uint64_t offerCount = 0;
     medida::Timer timer(std::chrono::microseconds(1));
 
     {
         LedgerTxn ltx(mApp.getLedgerTxnRoot());
         for (auto const& pair : bucketLedgerMap)
         {
-            // Don't check entry types in BucketListDB when enabled
-            if (mApp.getConfig().isUsingBucketListDB() &&
-                !BucketIndex::typeNotSupported(pair.first.type()))
+            // Don't check entry types supported by BucketListDB, since they
+            // won't exist in SQL
+            if (!BucketIndex::typeNotSupported(pair.first.type()))
             {
                 continue;
             }
 
-            counts.countLiveEntry(pair.second);
+            ++offerCount;
             std::string s;
             timer.Time([&]() { s = checkAgainstDatabase(ltx, pair.second); });
             if (!s.empty())
             {
                 throw std::runtime_error(s);
             }
-            auto i = counts.totalEntries();
-            if ((i & 0x7ffff) == 0)
+
+            if ((offerCount & 0x7ffff) == 0)
             {
                 using namespace std::chrono;
                 nanoseconds ns = timer.duration_unit() *
@@ -220,60 +145,37 @@ BucketListIsConsistentWithDatabase::checkEntireBucketlist()
                 CLOG_INFO(Ledger,
                           "Checked bucket-vs-DB consistency for "
                           "{} entries (mean {}/entry)",
-                          i, us);
+                          offerCount, us);
             }
         }
     }
 
-    // Count functionality does not support in-memory LedgerTxn
-    if (!mApp.getConfig().isInMemoryMode())
-    {
-        auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ,
-                                            has.currentLedger);
+    auto range = LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ,
+                                        has.currentLedger);
 
-        // If BucketListDB enabled, only types not supported by BucketListDB
-        // should be in SQL DB
-        std::function<bool(LedgerEntryType)> filter;
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            filter = BucketIndex::typeNotSupported;
-        }
-        else
-        {
-            filter = [](LedgerEntryType) { return true; };
-        }
-
-        auto s = counts.checkDbEntryCounts(mApp, range, filter);
-        if (!s.empty())
-        {
-            throw std::runtime_error(s);
-        }
+    auto s = checkDbEntryCounts(mApp, range, offerCount);
+    if (!s.empty())
+    {
+        throw std::runtime_error(s);
     }
 
-    if (mApp.getConfig().isUsingBucketListDB() &&
-        mApp.getPersistentState().getState(PersistentState::kDBBackend) !=
-            BucketIndex::DB_BACKEND_STATE)
+    if (mApp.getPersistentState().getState(PersistentState::kDBBackend) !=
+        BucketIndex::DB_BACKEND_STATE)
     {
-        throw std::runtime_error("BucketListDB enabled but BucketListDB flag "
-                                 "not set in PersistentState.");
+        throw std::runtime_error(
+            "Corrupt DB: BucketListDB flag "
+            "not set in PersistentState. Please run new-db or upgrade-db");
     }
 }
 
 std::string
 BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
 {
-    // If BucketListDB is disabled, we've already enforced the invariant on a
-    // per-Bucket level
-    if (!mApp.getConfig().isUsingBucketListDB())
-    {
-        return {};
-    }
-
-    EntryCounts counts;
+    uint64_t offerCount = 0;
     LedgerKeySet seenKeys;
 
     auto perBucketCheck = [&](auto bucket, auto& ltx) {
-        for (BucketInputIterator iter(bucket); iter; ++iter)
+        for (LiveBucketInputIterator iter(bucket); iter; ++iter)
         {
             auto const& e = *iter;
 
@@ -290,8 +192,7 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
                 auto [_, newKey] = seenKeys.emplace(key);
                 if (newKey)
                 {
-                    counts.countLiveEntry(e.liveEntry());
-
+                    ++offerCount;
                     auto s = checkAgainstDatabase(ltx, e.liveEntry());
                     if (!s.empty())
                     {
@@ -325,9 +226,9 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
 
     {
         LedgerTxn ltx(mApp.getLedgerTxnRoot());
-        auto& bl = mApp.getBucketManager().getBucketList();
+        auto& bl = mApp.getBucketManager().getLiveBucketList();
 
-        for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+        for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
         {
             auto const& level = bl.getLevel(i);
             for (auto const& bucket : {level.getCurr(), level.getSnap()})
@@ -344,26 +245,25 @@ BucketListIsConsistentWithDatabase::checkAfterAssumeState(uint32_t newestLedger)
     auto range =
         LedgerRange::inclusive(LedgerManager::GENESIS_LEDGER_SEQ, newestLedger);
 
-    // SQL only stores offers when BucketListDB is enabled
-    return counts.checkDbEntryCounts(
-        mApp, range, [](LedgerEntryType let) { return let == OFFER; });
+    return checkDbEntryCounts(mApp, range, offerCount);
 }
 
 std::string
 BucketListIsConsistentWithDatabase::checkOnBucketApply(
-    std::shared_ptr<Bucket const> bucket, uint32_t oldestLedger,
-    uint32_t newestLedger, std::function<bool(LedgerEntryType)> entryTypeFilter)
+    std::shared_ptr<LiveBucket const> bucket, uint32_t oldestLedger,
+    uint32_t newestLedger, std::unordered_set<LedgerKey> const& shadowedKeys)
 {
-    EntryCounts counts;
+    uint64_t offerCount = 0;
     {
         LedgerTxn ltx(mApp.getLedgerTxnRoot());
 
         bool hasPreviousEntry = false;
         BucketEntry previousEntry;
-        for (BucketInputIterator iter(bucket); iter; ++iter)
+        for (LiveBucketInputIterator iter(bucket); iter; ++iter)
         {
             auto const& e = *iter;
-            if (hasPreviousEntry && !BucketEntryIdCmp{}(previousEntry, e))
+            if (hasPreviousEntry &&
+                !BucketEntryIdCmp<LiveBucket>{}(previousEntry, e))
             {
                 std::string s = "Bucket has out of order entries: ";
                 s += xdrToCerealString(previousEntry, "previous");
@@ -394,28 +294,25 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply(
                     return s;
                 }
 
-                if (entryTypeFilter(e.liveEntry().data.type()))
+                // Don't check DB against keys shadowed by earlier Buckets
+                if (BucketIndex::typeNotSupported(e.liveEntry().data.type()) &&
+                    shadowedKeys.find(LedgerEntryKey(e.liveEntry())) ==
+                        shadowedKeys.end())
                 {
-                    counts.countLiveEntry(e.liveEntry());
-
-                    // BucketListDB is not compatible with per-Bucket database
-                    // consistency checks
-                    if (!mApp.getConfig().isUsingBucketListDB())
+                    ++offerCount;
+                    auto s = checkAgainstDatabase(ltx, e.liveEntry());
+                    if (!s.empty())
                     {
-                        auto s = checkAgainstDatabase(ltx, e.liveEntry());
-                        if (!s.empty())
-                        {
-                            return s;
-                        }
+                        return s;
                     }
                 }
             }
-            else if (e.type() == DEADENTRY)
+            else
             {
-                // BucketListDB is not compatible with per-Bucket database
-                // consistency checks
-                if (entryTypeFilter(e.deadEntry().type()) &&
-                    !mApp.getConfig().isUsingBucketListDB())
+                // Only check for OFFER keys that are not shadowed by an earlier
+                // bucket
+                if (BucketIndex::typeNotSupported(e.deadEntry().type()) &&
+                    shadowedKeys.find(e.deadEntry()) == shadowedKeys.end())
                 {
                     auto s = checkAgainstDatabase(ltx, e.deadEntry());
                     if (!s.empty())
@@ -428,13 +325,6 @@ BucketListIsConsistentWithDatabase::checkOnBucketApply(
     }
 
     auto range = LedgerRange::inclusive(oldestLedger, newestLedger);
-
-    // BucketListDB not compatible with per-Bucket database consistency checks
-    if (!mApp.getConfig().isUsingBucketListDB())
-    {
-        return counts.checkDbEntryCounts(mApp, range, entryTypeFilter);
-    }
-
-    return std::string{};
+    return checkDbEntryCounts(mApp, range, offerCount);
 }
 }
diff --git a/src/invariant/BucketListIsConsistentWithDatabase.h b/src/invariant/BucketListIsConsistentWithDatabase.h
index b98253dbc9..a9bb3003ac 100644
--- a/src/invariant/BucketListIsConsistentWithDatabase.h
+++ b/src/invariant/BucketListIsConsistentWithDatabase.h
@@ -34,9 +34,9 @@ class BucketListIsConsistentWithDatabase : public Invariant
     virtual std::string getName() const override;
 
     virtual std::string checkOnBucketApply(
-        std::shared_ptr<Bucket const> bucket, uint32_t oldestLedger,
+        std::shared_ptr<LiveBucket const> bucket, uint32_t oldestLedger,
         uint32_t newestLedger,
-        std::function<bool(LedgerEntryType)> entryTypeFilter) override;
+        std::unordered_set<LedgerKey> const& shadowedKeys) override;
 
     virtual std::string checkAfterAssumeState(uint32_t newestLedger) override;
 
diff --git a/src/invariant/Invariant.h b/src/invariant/Invariant.h
index ddb235795d..6a90105477 100644
--- a/src/invariant/Invariant.h
+++ b/src/invariant/Invariant.h
@@ -8,15 +8,17 @@
 #include <functional>
 #include <memory>
 #include <string>
+#include <unordered_set>
 
 namespace stellar
 {
 
-class Bucket;
+class LiveBucket;
 enum LedgerEntryType : std::int32_t;
 struct LedgerTxnDelta;
 struct Operation;
 struct OperationResult;
+struct LedgerKey;
 
 // NOTE: The checkOn* functions should have a default implementation so that
 //       more can be added in the future without requiring changes to all
@@ -43,9 +45,9 @@ class Invariant
     }
 
     virtual std::string
-    checkOnBucketApply(std::shared_ptr<Bucket const> bucket,
+    checkOnBucketApply(std::shared_ptr<LiveBucket const> bucket,
                        uint32_t oldestLedger, uint32_t newestLedger,
-                       std::function<bool(LedgerEntryType)> entryTypeFilter)
+                       std::unordered_set<LedgerKey> const& shadowedKeys)
     {
         return std::string{};
     }
diff --git a/src/invariant/InvariantManager.h b/src/invariant/InvariantManager.h
index 361afc150a..61575fcd49 100644
--- a/src/invariant/InvariantManager.h
+++ b/src/invariant/InvariantManager.h
@@ -35,10 +35,12 @@ class InvariantManager
 
     virtual Json::Value getJsonInfo() = 0;
     virtual std::vector<std::string> getEnabledInvariants() const = 0;
+    virtual bool isBucketApplyInvariantEnabled() const = 0;
 
-    virtual void checkOnBucketApply(
-        std::shared_ptr<Bucket const> bucket, uint32_t ledger, uint32_t level,
-        bool isCurr, std::function<bool(LedgerEntryType)> entryTypeFilter) = 0;
+    virtual void
+    checkOnBucketApply(std::shared_ptr<LiveBucket const> bucket,
+                       uint32_t ledger, uint32_t level, bool isCurr,
+                       std::unordered_set<LedgerKey> const& shadowedKeys) = 0;
 
     virtual void checkAfterAssumeState(uint32_t newestLedger) = 0;
 
diff --git a/src/invariant/InvariantManagerImpl.cpp b/src/invariant/InvariantManagerImpl.cpp
index df0ca6f61a..c0da64a78b 100644
--- a/src/invariant/InvariantManagerImpl.cpp
+++ b/src/invariant/InvariantManagerImpl.cpp
@@ -69,21 +69,30 @@ InvariantManagerImpl::getEnabledInvariants() const
     return res;
 }
 
+bool
+InvariantManagerImpl::isBucketApplyInvariantEnabled() const
+{
+    return std::any_of(mEnabled.begin(), mEnabled.end(), [](auto const& inv) {
+        return inv->getName() == "BucketListIsConsistentWithDatabase";
+    });
+}
+
 void
 InvariantManagerImpl::checkOnBucketApply(
-    std::shared_ptr<Bucket const> bucket, uint32_t ledger, uint32_t level,
-    bool isCurr, std::function<bool(LedgerEntryType)> entryTypeFilter)
+    std::shared_ptr<LiveBucket const> bucket, uint32_t ledger, uint32_t level,
+    bool isCurr, std::unordered_set<LedgerKey> const& shadowedKeys)
 {
-    uint32_t oldestLedger = isCurr
-                                ? BucketList::oldestLedgerInCurr(ledger, level)
-                                : BucketList::oldestLedgerInSnap(ledger, level);
-    uint32_t newestLedger = oldestLedger - 1 +
-                            (isCurr ? BucketList::sizeOfCurr(ledger, level)
-                                    : BucketList::sizeOfSnap(ledger, level));
+    uint32_t oldestLedger =
+        isCurr ? LiveBucketList::oldestLedgerInCurr(ledger, level)
+               : LiveBucketList::oldestLedgerInSnap(ledger, level);
+    uint32_t newestLedger =
+        oldestLedger - 1 +
+        (isCurr ? LiveBucketList::sizeOfCurr(ledger, level)
+                : LiveBucketList::sizeOfSnap(ledger, level));
     for (auto invariant : mEnabled)
     {
-        auto result = invariant->checkOnBucketApply(
-            bucket, oldestLedger, newestLedger, entryTypeFilter);
+        auto result = invariant->checkOnBucketApply(bucket, oldestLedger,
+                                                    newestLedger, shadowedKeys);
         if (result.empty())
         {
             continue;
diff --git a/src/invariant/InvariantManagerImpl.h b/src/invariant/InvariantManagerImpl.h
index 5e495bcf3c..fbbb35fee8 100644
--- a/src/invariant/InvariantManagerImpl.h
+++ b/src/invariant/InvariantManagerImpl.h
@@ -36,15 +36,16 @@ class InvariantManagerImpl : public InvariantManager
     virtual Json::Value getJsonInfo() override;
 
     virtual std::vector<std::string> getEnabledInvariants() const override;
+    bool isBucketApplyInvariantEnabled() const override;
 
     virtual void checkOnOperationApply(Operation const& operation,
                                        OperationResult const& opres,
                                        LedgerTxnDelta const& ltxDelta) override;
 
     virtual void checkOnBucketApply(
-        std::shared_ptr<Bucket const> bucket, uint32_t ledger, uint32_t level,
-        bool isCurr,
-        std::function<bool(LedgerEntryType)> entryTypeFilter) override;
+        std::shared_ptr<LiveBucket const> bucket, uint32_t ledger,
+        uint32_t level, bool isCurr,
+        std::unordered_set<LedgerKey> const& shadowedKeys) override;
 
     virtual void checkAfterAssumeState(uint32_t newestLedger) override;
 
diff --git a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp
index acc308be6b..20892ad29e 100644
--- a/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp
+++ b/src/invariant/test/AccountSubEntriesCountIsValidTests.cpp
@@ -292,7 +292,7 @@ deleteRandomSubEntryFromAccount(Application& app, LedgerEntry& le,
 TEST_CASE("Create account with no subentries",
           "[invariant][accountsubentriescount]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"};
     VirtualClock clock;
     Application::pointer app = createTestApplication(clock, cfg);
@@ -309,7 +309,7 @@ TEST_CASE("Create account then add signers and subentries",
           "[invariant][accountsubentriescount]")
 {
     stellar::uniform_int_distribution<int32_t> changesDist(-1, 2);
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"AccountSubEntriesCountIsValid"};
 
     for (uint32_t i = 0; i < 50; ++i)
diff --git a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp
index 69edb0711b..0cd75275ad 100644
--- a/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp
+++ b/src/invariant/test/BucketListIsConsistentWithDatabaseTests.cpp
@@ -2,6 +2,7 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/Bucket.h"
 #include "bucket/BucketInputIterator.h"
 #include "bucket/BucketManager.h"
 #include "bucket/BucketOutputIterator.h"
@@ -19,6 +20,7 @@
 #include "test/test.h"
 #include "transactions/TransactionUtils.h"
 #include "util/Decoder.h"
+#include "util/GlobalChecks.h"
 #include "util/Math.h"
 #include "util/UnorderedSet.h"
 #include "util/XDROperators.h"
@@ -42,44 +44,10 @@ struct BucketListGenerator
   public:
     BucketListGenerator() : mLedgerSeq(1)
     {
-        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+        auto cfg = getTestConfig();
         cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true;
         cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1;
         mAppGenerate = createTestApplication(mClock, cfg);
-
-        auto skey = SecretKey::fromSeed(mAppGenerate->getNetworkID());
-        LedgerKey key(ACCOUNT);
-        key.account().accountID = skey.getPublicKey();
-        mLiveKeys.insert(key);
-
-        if (appProtocolVersionStartsFrom(*mAppGenerate,
-                                         SOROBAN_PROTOCOL_VERSION))
-        {
-            // All config settings entries will be created automatically during
-            // the protocol upgrade and NOT generated by tests, so they should
-            // be reflected in the live key set. This allows tests to still run
-            // on those entries.
-            for (auto t : xdr::xdr_traits<ConfigSettingID>::enum_values())
-            {
-#ifdef ENABLE_NEXT_PROTOCOL_VERSION_UNSAFE_FOR_PRODUCTION
-                // This setting has been introduced in the vnext xdr, but it's
-                // not used in code yet. This check can be replaced with a
-                // runtime protocol check once we create the setting in the
-                // upgrade path.
-                if (static_cast<ConfigSettingID>(t) ==
-                    ConfigSettingID::
-                        CONFIG_SETTING_CONTRACT_PARALLEL_COMPUTE_V0)
-                {
-                    continue;
-                }
-#endif
-                LedgerKey ckey(CONFIG_SETTING);
-                ckey.configSetting().configSettingID =
-                    static_cast<ConfigSettingID>(t);
-                mLiveKeys.insert(ckey);
-            }
-        }
-
         LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false);
         REQUIRE(mLedgerSeq == ltx.loadHeader().current().ledgerSeq);
     }
@@ -88,7 +56,7 @@ struct BucketListGenerator
     void
     applyBuckets(Application::pointer app, Args&&... args)
     {
-        std::map<std::string, std::shared_ptr<Bucket>> buckets;
+        std::map<std::string, std::shared_ptr<LiveBucket>> buckets;
         auto has = getHistoryArchiveState(app);
         auto& wm = app->getWorkScheduler();
         wm.executeWork<T>(buckets, has,
@@ -101,8 +69,8 @@ struct BucketListGenerator
     applyBuckets(Args&&... args)
     {
         VirtualClock clock;
-        Application::pointer app = createTestApplication(
-            clock, getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS));
+        Application::pointer app =
+            createTestApplication(clock, getTestConfig(1));
         applyBuckets<T, Args...>(app, std::forward<Args>(args)...);
     }
 
@@ -144,9 +112,8 @@ struct BucketListGenerator
         std::vector<LedgerKey> deadEntries;
         auto header = ltx.loadHeader().current();
         ltx.getAllEntries(initEntries, liveEntries, deadEntries);
-        BucketTestUtils::addBatchAndUpdateSnapshot(
-            app->getBucketManager().getBucketList(), *app, header, initEntries,
-            liveEntries, deadEntries);
+        BucketTestUtils::addLiveBatchAndUpdateSnapshot(
+            *app, header, initEntries, liveEntries, deadEntries);
         ltx.commit();
     }
 
@@ -164,8 +131,8 @@ struct BucketListGenerator
     generateLiveEntries(AbstractLedgerTxn& ltx)
     {
         auto entries =
-            LedgerTestUtils::generateValidLedgerEntriesWithExclusions(
-                {CONFIG_SETTING}, 5);
+            LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes({OFFER},
+                                                                       5);
         for (auto& le : entries)
         {
             le.lastModifiedLedgerSeq = mLedgerSeq;
@@ -176,12 +143,7 @@ struct BucketListGenerator
     virtual std::vector<LedgerKey>
     generateDeadEntries(AbstractLedgerTxn& ltx)
     {
-        UnorderedSet<LedgerKey> liveDeletable(mLiveKeys.size());
-        std::copy_if(
-            mLiveKeys.begin(), mLiveKeys.end(),
-            std::inserter(liveDeletable, liveDeletable.end()),
-            [](LedgerKey const& key) { return key.type() != CONFIG_SETTING; });
-
+        UnorderedSet<LedgerKey> liveDeletable = mLiveKeys;
         std::vector<LedgerKey> dead;
         while (dead.size() < 2 && !liveDeletable.empty())
         {
@@ -205,28 +167,28 @@ struct BucketListGenerator
     HistoryArchiveState
     getHistoryArchiveState(Application::pointer app)
     {
-        auto& blGenerate = mAppGenerate->getBucketManager().getBucketList();
+        auto& blGenerate = mAppGenerate->getBucketManager().getLiveBucketList();
         auto& bmApply = app->getBucketManager();
         MergeCounters mergeCounters;
         LedgerTxn ltx(mAppGenerate->getLedgerTxnRoot(), false);
         auto vers = ltx.loadHeader().current().ledgerVersion;
-        for (uint32_t i = 0; i <= BucketList::kNumLevels - 1; i++)
+        for (uint32_t i = 0; i <= LiveBucketList::kNumLevels - 1; i++)
         {
             auto& level = blGenerate.getLevel(i);
             auto meta = testutil::testBucketMetadata(vers);
-            auto keepDead = BucketList::keepDeadEntries(i);
+            auto keepDead = LiveBucketList::keepTombstoneEntries(i);
 
             auto writeBucketFile = [&](auto b) {
-                BucketOutputIterator out(bmApply.getTmpDir(), keepDead, meta,
-                                         mergeCounters, mClock.getIOContext(),
-                                         /*doFsync=*/true);
-                for (BucketInputIterator in(b); in; ++in)
+                LiveBucketOutputIterator out(bmApply.getTmpDir(), keepDead,
+                                             meta, mergeCounters,
+                                             mClock.getIOContext(),
+                                             /*doFsync=*/true);
+                for (LiveBucketInputIterator in(b); in; ++in)
                 {
                     out.put(*in);
                 }
 
-                auto bucket =
-                    out.getBucket(bmApply, /*shouldSynchronouslyIndex=*/false);
+                auto bucket = out.getBucket(bmApply);
             };
             writeBucketFile(level.getCurr());
             writeBucketFile(level.getSnap());
@@ -246,9 +208,10 @@ struct BucketListGenerator
 };
 
 bool
-doesBucketContain(std::shared_ptr<Bucket const> bucket, const BucketEntry& be)
+doesBucketContain(std::shared_ptr<LiveBucket const> bucket,
+                  const BucketEntry& be)
 {
-    for (BucketInputIterator iter(bucket); iter; ++iter)
+    for (LiveBucketInputIterator iter(bucket); iter; ++iter)
     {
         if (*iter == be)
         {
@@ -259,9 +222,9 @@ doesBucketContain(std::shared_ptr<Bucket const> bucket, const BucketEntry& be)
 }
 
 bool
-doesBucketListContain(BucketList& bl, const BucketEntry& be)
+doesBucketListContain(LiveBucketList& bl, const BucketEntry& be)
 {
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
         auto const& level = bl.getLevel(i);
         for (auto const& bucket : {level.getCurr(), level.getSnap()})
@@ -278,11 +241,10 @@ doesBucketListContain(BucketList& bl, const BucketEntry& be)
 struct SelectBucketListGenerator : public BucketListGenerator
 {
     uint32_t const mSelectLedger;
-    LedgerEntryType const mType;
     std::shared_ptr<LedgerEntry> mSelected;
 
-    SelectBucketListGenerator(uint32_t selectLedger, LedgerEntryType type)
-        : mSelectLedger(selectLedger), mType(type)
+    SelectBucketListGenerator(uint32_t selectLedger)
+        : mSelectLedger(selectLedger)
     {
     }
 
@@ -291,24 +253,35 @@ struct SelectBucketListGenerator : public BucketListGenerator
     {
         if (mLedgerSeq == mSelectLedger)
         {
-            UnorderedSet<LedgerKey> filteredKeys(mLiveKeys.size());
-            std::copy_if(
-                mLiveKeys.begin(), mLiveKeys.end(),
-                std::inserter(filteredKeys, filteredKeys.end()),
-                [this](LedgerKey const& key) { return key.type() == mType; });
-
-            if (!filteredKeys.empty())
+            if (!mLiveKeys.empty())
             {
                 stellar::uniform_int_distribution<size_t> dist(
-                    0, filteredKeys.size() - 1);
-                auto iter = filteredKeys.begin();
+                    0, mLiveKeys.size() - 1);
+                auto iter = mLiveKeys.begin();
                 std::advance(iter, dist(gRandomEngine));
 
                 mSelected = std::make_shared<LedgerEntry>(
                     ltx.loadWithoutRecord(*iter).current());
             }
         }
-        return BucketListGenerator::generateLiveEntries(ltx);
+
+        auto live = BucketListGenerator::generateLiveEntries(ltx);
+
+        // Selected entry must not be shadowed
+        if (mSelected)
+        {
+            auto key = LedgerEntryKey(*mSelected);
+            for (size_t i = 0; i < live.size(); ++i)
+            {
+                if (LedgerEntryKey(live.at(i)) == key)
+                {
+                    live.erase(live.begin() + i);
+                    break;
+                }
+            }
+        }
+
+        return live;
     }
 
     virtual std::vector<LedgerKey>
@@ -337,10 +310,10 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork
   public:
     ApplyBucketsWorkAddEntry(
         Application& app,
-        std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
+        std::map<std::string, std::shared_ptr<LiveBucket>> const& buckets,
         HistoryArchiveState const& applyState, uint32_t maxProtocolVersion,
-        std::function<bool(LedgerEntryType)> filter, LedgerEntry const& entry)
-        : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion, filter)
+        LedgerEntry const& entry)
+        : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion)
         , mEntry(entry)
         , mAdded{false}
     {
@@ -356,13 +329,8 @@ class ApplyBucketsWorkAddEntry : public ApplyBucketsWork
             uint32_t maxLedger = std::numeric_limits<int32_t>::max() - 1;
             auto& ltxRoot = mApp.getLedgerTxnRoot();
 
-            size_t count = 0;
-            for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-            {
-                count += ltxRoot.countObjects(
-                    static_cast<LedgerEntryType>(let),
-                    LedgerRange::inclusive(minLedger, maxLedger));
-            }
+            auto count = ltxRoot.countOffers(
+                LedgerRange::inclusive(minLedger, maxLedger));
 
             if (count > 0)
             {
@@ -391,7 +359,7 @@ class ApplyBucketsWorkDeleteEntry : public ApplyBucketsWork
   public:
     ApplyBucketsWorkDeleteEntry(
         Application& app,
-        std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
+        std::map<std::string, std::shared_ptr<LiveBucket>> const& buckets,
         HistoryArchiveState const& applyState, uint32_t maxProtocolVersion,
         LedgerEntry const& target)
         : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion)
@@ -431,26 +399,6 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork
     LedgerEntry const mEntry;
     bool mModified;
 
-    void
-    modifyAccountEntry(LedgerEntry& entry)
-    {
-        AccountEntry const& account = mEntry.data.account();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.account() = LedgerTestUtils::generateValidAccountEntry(5);
-        entry.data.account().accountID = account.accountID;
-    }
-
-    void
-    modifyTrustLineEntry(LedgerEntry& entry)
-    {
-        TrustLineEntry const& trustLine = mEntry.data.trustLine();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.trustLine() =
-            LedgerTestUtils::generateValidTrustLineEntry(5);
-        entry.data.trustLine().accountID = trustLine.accountID;
-        entry.data.trustLine().asset = trustLine.asset;
-    }
-
     void
     modifyOfferEntry(LedgerEntry& entry)
     {
@@ -461,94 +409,10 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork
         entry.data.offer().offerID = offer.offerID;
     }
 
-    void
-    modifyDataEntry(LedgerEntry& entry)
-    {
-        DataEntry const& data = mEntry.data.data();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        do
-        {
-            entry.data.data() = LedgerTestUtils::generateValidDataEntry(5);
-        } while (entry.data.data().dataValue == data.dataValue);
-        entry.data.data().accountID = data.accountID;
-        entry.data.data().dataName = data.dataName;
-    }
-
-    void
-    modifyClaimableBalanceEntry(LedgerEntry& entry)
-    {
-        ClaimableBalanceEntry const& cb = mEntry.data.claimableBalance();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.claimableBalance() =
-            LedgerTestUtils::generateValidClaimableBalanceEntry(5);
-
-        entry.data.claimableBalance().balanceID = cb.balanceID;
-    }
-
-    void
-    modifyLiquidityPoolEntry(LedgerEntry& entry)
-    {
-        LiquidityPoolEntry const& lp = mEntry.data.liquidityPool();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.liquidityPool() =
-            LedgerTestUtils::generateValidLiquidityPoolEntry(5);
-
-        entry.data.liquidityPool().liquidityPoolID = lp.liquidityPoolID;
-    }
-
-    void
-    modifyConfigSettingEntry(LedgerEntry& entry)
-    {
-        ConfigSettingEntry const& cfg = mEntry.data.configSetting();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.configSetting() =
-            LedgerTestUtils::generateValidConfigSettingEntry(5);
-
-        entry.data.configSetting().configSettingID(cfg.configSettingID());
-    }
-
-    void
-    modifyContractDataEntry(LedgerEntry& entry)
-    {
-        ContractDataEntry const& cd = mEntry.data.contractData();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.contractData() =
-            LedgerTestUtils::generateValidContractDataEntry(5);
-
-        entry.data.contractData().contract = cd.contract;
-        entry.data.contractData().key = cd.key;
-    }
-
-    void
-    modifyContractCodeEntry(LedgerEntry& entry)
-    {
-        ContractCodeEntry const& cc = mEntry.data.contractCode();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-
-        while (entry.data.contractCode().code ==
-               mEntry.data.contractCode().code)
-        {
-            entry.data.contractCode() =
-                LedgerTestUtils::generateValidContractCodeEntry(5);
-        }
-
-        entry.data.contractCode().hash = cc.hash;
-    }
-
-    void
-    modifyTTLEntry(LedgerEntry& entry)
-    {
-        TTLEntry const& ee = mEntry.data.ttl();
-        entry.lastModifiedLedgerSeq = mEntry.lastModifiedLedgerSeq;
-        entry.data.ttl() = LedgerTestUtils::generateValidTTLEntry(5);
-
-        entry.data.ttl().keyHash = ee.keyHash;
-    }
-
   public:
     ApplyBucketsWorkModifyEntry(
         Application& app,
-        std::map<std::string, std::shared_ptr<Bucket>> const& buckets,
+        std::map<std::string, std::shared_ptr<LiveBucket>> const& buckets,
         HistoryArchiveState const& applyState, uint32_t maxProtocolVersion,
         LedgerEntry const& target)
         : ApplyBucketsWork(app, buckets, applyState, maxProtocolVersion)
@@ -567,41 +431,10 @@ class ApplyBucketsWorkModifyEntry : public ApplyBucketsWork
             auto entry = ltx.load(mKey);
             while (entry && entry.current() == mEntry)
             {
-                switch (mEntry.data.type())
-                {
-                case ACCOUNT:
-                    modifyAccountEntry(entry.current());
-                    break;
-                case TRUSTLINE:
-                    modifyTrustLineEntry(entry.current());
-                    break;
-                case OFFER:
-                    modifyOfferEntry(entry.current());
-                    break;
-                case DATA:
-                    modifyDataEntry(entry.current());
-                    break;
-                case CLAIMABLE_BALANCE:
-                    modifyClaimableBalanceEntry(entry.current());
-                    break;
-                case LIQUIDITY_POOL:
-                    modifyLiquidityPoolEntry(entry.current());
-                    break;
-                case CONFIG_SETTING:
-                    modifyConfigSettingEntry(entry.current());
-                    break;
-                case CONTRACT_DATA:
-                    modifyContractDataEntry(entry.current());
-                    break;
-                case CONTRACT_CODE:
-                    modifyContractCodeEntry(entry.current());
-                    break;
-                case TTL:
-                    modifyTTLEntry(entry.current());
-                    break;
-                default:
-                    REQUIRE(false);
-                }
+                releaseAssert(
+                    BucketIndex::typeNotSupported(mEntry.data.type()));
+
+                modifyOfferEntry(entry.current());
                 mModified = true;
             }
 
@@ -653,168 +486,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase empty ledgers",
     REQUIRE_NOTHROW(blg.applyBuckets());
 }
 
-TEST_CASE("BucketListIsConsistentWithDatabase test root account",
-          "[invariant][bucketlistconsistent]")
-{
-    struct TestRootBucketListGenerator : public BucketListGenerator
-    {
-        uint32_t const mTargetLedger;
-        bool mModifiedRoot;
-
-        TestRootBucketListGenerator()
-            : mTargetLedger(stellar::uniform_int_distribution<uint32_t>(2, 100)(
-                  gRandomEngine))
-            , mModifiedRoot(false)
-        {
-        }
-
-        virtual std::vector<LedgerEntry>
-        generateLiveEntries(AbstractLedgerTxn& ltx)
-        {
-            if (mLedgerSeq == mTargetLedger)
-            {
-                mModifiedRoot = true;
-                auto& app = mAppGenerate;
-                auto skey = SecretKey::fromSeed(app->getNetworkID());
-                auto root = skey.getPublicKey();
-                auto le =
-                    stellar::loadAccountWithoutRecord(ltx, root).current();
-                le.lastModifiedLedgerSeq = mLedgerSeq;
-                return {le};
-            }
-            else
-            {
-                return BucketListGenerator::generateLiveEntries(ltx);
-            }
-        }
-
-        virtual std::vector<LedgerKey>
-        generateDeadEntries(AbstractLedgerTxn& ltx)
-        {
-            return {};
-        }
-    };
-
-    for (size_t j = 0; j < 5; ++j)
-    {
-        TestRootBucketListGenerator blg;
-        blg.generateLedgers(100);
-        REQUIRE(blg.mModifiedRoot);
-        REQUIRE_NOTHROW(blg.applyBuckets());
-    }
-}
-
 TEST_CASE("BucketListIsConsistentWithDatabase added entries",
           "[invariant][bucketlistconsistent][acceptance]")
 {
-    auto runTest = [](bool withFilter) {
-        for (size_t nTests = 0; nTests < 40; ++nTests)
-        {
-            BucketListGenerator blg;
-            blg.generateLedgers(100);
-
-            stellar::uniform_int_distribution<uint32_t> addAtLedgerDist(
-                2, blg.mLedgerSeq);
-            auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions(
-                {CONFIG_SETTING}, 5);
-            le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine);
-
-            if (!withFilter)
-            {
-                auto filter = [](auto) { return true; };
-                if (le.data.type() == CONFIG_SETTING)
-                {
-                    // Config settings would have a duplicate key due to low key
-                    // space.
-                    REQUIRE_THROWS_AS(
-                        blg.applyBuckets<ApplyBucketsWorkAddEntry>(filter, le),
-                        std::runtime_error);
-                }
-                else
-                {
-                    REQUIRE_THROWS_AS(
-                        blg.applyBuckets<ApplyBucketsWorkAddEntry>(filter, le),
-                        InvariantDoesNotHold);
-                }
-            }
-            else
-            {
-                auto filter = [&](auto let) { return let != le.data.type(); };
-                REQUIRE_NOTHROW(
-                    blg.applyBuckets<ApplyBucketsWorkAddEntry>(filter, le));
-            }
-        }
-    };
-
-    runTest(true);
+    for (size_t nTests = 0; nTests < 40; ++nTests)
+    {
+        BucketListGenerator blg;
+        blg.generateLedgers(100);
 
-    // This tests the filtering behavior of BucketListIsConsistentWithDatabase
-    // because the bucket apply will not add anything of the specified
-    // LedgerEntryType, but we will inject an additional LedgerEntry of that
-    // type anyway. But it shouldn't throw because the invariant isn't looking
-    // for those changes.
-    runTest(false);
+        stellar::uniform_int_distribution<uint32_t> addAtLedgerDist(
+            2, blg.mLedgerSeq);
+        auto le =
+            LedgerTestUtils::generateValidLedgerEntryWithTypes({OFFER}, 10);
+        le.lastModifiedLedgerSeq = addAtLedgerDist(gRandomEngine);
+        REQUIRE_THROWS_AS(blg.applyBuckets<ApplyBucketsWorkAddEntry>(le),
+                          InvariantDoesNotHold);
+    }
 }
 
 TEST_CASE("BucketListIsConsistentWithDatabase deleted entries",
           "[invariant][bucketlistconsistent][acceptance]")
 {
-    for (auto t : xdr::xdr_traits<LedgerEntryType>::enum_values())
+    size_t nTests = 0;
+    while (nTests < 10)
     {
-        size_t nTests = 0;
-        while (nTests < 10)
+        SelectBucketListGenerator blg(100);
+        blg.generateLedgers(100);
+        if (!blg.mSelected)
         {
-            SelectBucketListGenerator blg(100, static_cast<LedgerEntryType>(t));
-            blg.generateLedgers(100);
-            if (!blg.mSelected)
-            {
-                continue;
-            }
-            if (t == CONFIG_SETTING)
-            {
-                // Configuration can not be deleted.
-                REQUIRE_THROWS_AS(blg.applyBuckets<ApplyBucketsWorkDeleteEntry>(
-                                      *blg.mSelected),
-                                  std::runtime_error);
-            }
-            else
-            {
-                REQUIRE_THROWS_AS(blg.applyBuckets<ApplyBucketsWorkDeleteEntry>(
-                                      *blg.mSelected),
-                                  InvariantDoesNotHold);
-            }
-            ++nTests;
+            continue;
         }
+
+        REQUIRE_THROWS_AS(
+            blg.applyBuckets<ApplyBucketsWorkDeleteEntry>(*blg.mSelected),
+            InvariantDoesNotHold);
+        ++nTests;
     }
 }
 
 TEST_CASE("BucketListIsConsistentWithDatabase modified entries",
           "[invariant][bucketlistconsistent][acceptance]")
 {
-    for (auto t : xdr::xdr_traits<LedgerEntryType>::enum_values())
+    size_t nTests = 0;
+    while (nTests < 10)
     {
-        // Skip CONFIG_SETTING for now because the test modification test does
-        // not work unless blg itself updates the entry.
-        if (t == CONFIG_SETTING)
+        SelectBucketListGenerator blg(100);
+        blg.generateLedgers(100);
+        if (!blg.mSelected)
         {
             continue;
         }
 
-        size_t nTests = 0;
-        while (nTests < 10)
-        {
-            SelectBucketListGenerator blg(100, static_cast<LedgerEntryType>(t));
-            blg.generateLedgers(100);
-            if (!blg.mSelected)
-            {
-                continue;
-            }
-
-            REQUIRE_THROWS_AS(
-                blg.applyBuckets<ApplyBucketsWorkModifyEntry>(*blg.mSelected),
-                InvariantDoesNotHold);
-            ++nTests;
-        }
+        REQUIRE_THROWS_AS(
+            blg.applyBuckets<ApplyBucketsWorkModifyEntry>(*blg.mSelected),
+            InvariantDoesNotHold);
+        ++nTests;
     }
 }
 
@@ -857,15 +583,15 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds",
         }
     };
 
-    for (uint32_t level = 0; level < BucketList::kNumLevels; ++level)
+    for (uint32_t level = 0; level < LiveBucketList::kNumLevels; ++level)
     {
-        uint32_t oldestLedger = BucketList::oldestLedgerInSnap(101, level);
+        uint32_t oldestLedger = LiveBucketList::oldestLedgerInSnap(101, level);
         if (oldestLedger == std::numeric_limits<uint32_t>::max())
         {
             break;
         }
-        uint32_t newestLedger = BucketList::oldestLedgerInCurr(101, level) +
-                                BucketList::sizeOfCurr(101, level) - 1;
+        uint32_t newestLedger = LiveBucketList::oldestLedgerInCurr(101, level) +
+                                LiveBucketList::sizeOfCurr(101, level) - 1;
         stellar::uniform_int_distribution<uint32_t> ledgerToModifyDist(
             std::max(2u, oldestLedger), newestLedger);
 
@@ -874,20 +600,21 @@ TEST_CASE("BucketListIsConsistentWithDatabase bucket bounds",
             uint32_t ledgerToModify = ledgerToModifyDist(gRandomEngine);
             uint32_t maxLowTargetLedger = 0;
             uint32_t minHighTargetLedger = 0;
-            if (ledgerToModify >= BucketList::oldestLedgerInCurr(101, level))
+            if (ledgerToModify >=
+                LiveBucketList::oldestLedgerInCurr(101, level))
             {
                 maxLowTargetLedger =
-                    BucketList::oldestLedgerInCurr(101, level) - 1;
+                    LiveBucketList::oldestLedgerInCurr(101, level) - 1;
                 minHighTargetLedger =
-                    BucketList::oldestLedgerInCurr(101, level) +
-                    BucketList::sizeOfCurr(101, level);
+                    LiveBucketList::oldestLedgerInCurr(101, level) +
+                    LiveBucketList::sizeOfCurr(101, level);
             }
             else
             {
                 maxLowTargetLedger =
-                    BucketList::oldestLedgerInSnap(101, level) - 1;
+                    LiveBucketList::oldestLedgerInSnap(101, level) - 1;
                 minHighTargetLedger =
-                    BucketList::oldestLedgerInCurr(101, level);
+                    LiveBucketList::oldestLedgerInCurr(101, level);
             }
             stellar::uniform_int_distribution<uint32_t> lowTargetLedgerDist(
                 1, maxLowTargetLedger);
@@ -913,8 +640,8 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY",
     {
         uint32_t const mTargetLedger;
 
-        MergeBucketListGenerator(LedgerEntryType let)
-            : SelectBucketListGenerator(25, let), mTargetLedger(110)
+        MergeBucketListGenerator()
+            : SelectBucketListGenerator(25), mTargetLedger(110)
         {
         }
 
@@ -937,72 +664,61 @@ TEST_CASE("BucketListIsConsistentWithDatabase merged LIVEENTRY and DEADENTRY",
         return (bool)ltx.load(LedgerEntryKey(le));
     };
 
-    auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(1);
     cfg.OVERRIDE_EVICTION_PARAMS_FOR_TESTING = true;
     cfg.TESTING_STARTING_EVICTION_SCAN_LEVEL = 1;
 
-    testutil::BucketListDepthModifier bldm(3);
-    for (auto t : xdr::xdr_traits<LedgerEntryType>::enum_values())
+    testutil::BucketListDepthModifier<LiveBucket> bldm(3);
+    uint32_t nTests = 0;
+    while (nTests < 5)
     {
-        if (t == CONFIG_SETTING)
+        MergeBucketListGenerator blg;
+        auto& blGenerate =
+            blg.mAppGenerate->getBucketManager().getLiveBucketList();
+
+        blg.generateLedgers(100);
+        if (!blg.mSelected)
         {
-            // Merge logic is not applicable to configuration.
             continue;
         }
 
-        uint32_t nTests = 0;
-        while (nTests < 5)
-        {
-            MergeBucketListGenerator blg(static_cast<LedgerEntryType>(t));
-            auto& blGenerate =
-                blg.mAppGenerate->getBucketManager().getBucketList();
-
-            blg.generateLedgers(100);
-            if (!blg.mSelected)
-            {
-                continue;
-            }
-
-            BucketEntry dead(DEADENTRY);
-            dead.deadEntry() = LedgerEntryKey(*blg.mSelected);
-            BucketEntry live(LIVEENTRY);
-            live.liveEntry() = *blg.mSelected;
-            BucketEntry init(INITENTRY);
-            init.liveEntry() = *blg.mSelected;
-
-            {
-                VirtualClock clock;
-                Application::pointer appApply =
-                    createTestApplication(clock, cfg);
-                REQUIRE_NOTHROW(blg.applyBuckets(appApply));
-                REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected));
-                REQUIRE(exists(*appApply, *blg.mSelected));
-            }
+        BucketEntry dead(DEADENTRY);
+        dead.deadEntry() = LedgerEntryKey(*blg.mSelected);
+        BucketEntry live(LIVEENTRY);
+        live.liveEntry() = *blg.mSelected;
+        BucketEntry init(INITENTRY);
+        init.liveEntry() = *blg.mSelected;
 
-            blg.generateLedgers(10);
-            REQUIRE(doesBucketListContain(blGenerate, dead));
-            REQUIRE((doesBucketListContain(blGenerate, live) ||
-                     doesBucketListContain(blGenerate, init)));
+        {
+            VirtualClock clock;
+            Application::pointer appApply = createTestApplication(clock, cfg);
+            REQUIRE_NOTHROW(blg.applyBuckets(appApply));
+            REQUIRE(exists(*blg.mAppGenerate, *blg.mSelected));
+            REQUIRE(exists(*appApply, *blg.mSelected));
+        }
 
-            blg.generateLedgers(100);
-            REQUIRE(!doesBucketListContain(blGenerate, dead));
-            REQUIRE(!(doesBucketListContain(blGenerate, live) ||
-                      doesBucketListContain(blGenerate, init)));
-            REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected));
+        blg.generateLedgers(10);
+        REQUIRE(doesBucketListContain(blGenerate, dead));
+        REQUIRE((doesBucketListContain(blGenerate, live) ||
+                 doesBucketListContain(blGenerate, init)));
 
-            {
-                VirtualClock clock;
-                Application::pointer appApply =
-                    createTestApplication(clock, cfg);
-                REQUIRE_NOTHROW(blg.applyBuckets(appApply));
-                auto& blApply = appApply->getBucketManager().getBucketList();
-                REQUIRE(!doesBucketListContain(blApply, dead));
-                REQUIRE(!(doesBucketListContain(blApply, live) ||
-                          doesBucketListContain(blApply, init)));
-                REQUIRE(!exists(*appApply, *blg.mSelected));
-            }
+        blg.generateLedgers(100);
+        REQUIRE(!doesBucketListContain(blGenerate, dead));
+        REQUIRE(!(doesBucketListContain(blGenerate, live) ||
+                  doesBucketListContain(blGenerate, init)));
+        REQUIRE(!exists(*blg.mAppGenerate, *blg.mSelected));
 
-            ++nTests;
+        {
+            VirtualClock clock;
+            Application::pointer appApply = createTestApplication(clock, cfg);
+            REQUIRE_NOTHROW(blg.applyBuckets(appApply));
+            auto& blApply = appApply->getBucketManager().getLiveBucketList();
+            REQUIRE(!doesBucketListContain(blApply, dead));
+            REQUIRE(!(doesBucketListContain(blApply, live) ||
+                      doesBucketListContain(blApply, init)));
+            REQUIRE(!exists(*appApply, *blg.mSelected));
         }
+
+        ++nTests;
     }
 }
diff --git a/src/invariant/test/ConservationOfLumensTests.cpp b/src/invariant/test/ConservationOfLumensTests.cpp
index 6b91b127b0..e5686c70ef 100644
--- a/src/invariant/test/ConservationOfLumensTests.cpp
+++ b/src/invariant/test/ConservationOfLumensTests.cpp
@@ -153,7 +153,7 @@ TEST_CASE("Fee pool change without inflation",
 TEST_CASE("Account balances changed without inflation",
           "[invariant][conservationoflumens]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"ConservationOfLumens"};
 
     uint32_t const N = 10;
@@ -187,7 +187,7 @@ TEST_CASE("Account balances changed without inflation",
 TEST_CASE("Account balances unchanged without inflation",
           "[invariant][conservationoflumens]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"ConservationOfLumens"};
 
     uint32_t const N = 10;
@@ -228,7 +228,7 @@ TEST_CASE("Account balances unchanged without inflation",
 TEST_CASE("Inflation changes are consistent",
           "[invariant][conservationoflumens]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"ConservationOfLumens"};
     stellar::uniform_int_distribution<uint32_t> payoutsDist(1, 100);
     stellar::uniform_int_distribution<int64_t> amountDist(1, 100000);
diff --git a/src/invariant/test/InvariantTests.cpp b/src/invariant/test/InvariantTests.cpp
index 020e94037d..857056e51f 100644
--- a/src/invariant/test/InvariantTests.cpp
+++ b/src/invariant/test/InvariantTests.cpp
@@ -54,9 +54,10 @@ class TestInvariant : public Invariant
     }
 
     virtual std::string
-    checkOnBucketApply(std::shared_ptr<Bucket const> bucket,
-                       uint32_t oldestLedger, uint32_t newestLedger,
-                       std::function<bool(LedgerEntryType)> filter) override
+    checkOnBucketApply(
+        std::shared_ptr<LiveBucket const> bucket, uint32_t oldestLedger,
+        uint32_t newestLedger,
+        std::unordered_set<LedgerKey> const& shadowedKeys) override
     {
         return mShouldFail ? "fail" : "";
     }
@@ -164,14 +165,13 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]")
         app->getInvariantManager().enableInvariant(
             TestInvariant::toString(0, true));
 
-        auto bucket = std::make_shared<Bucket>();
+        auto bucket = std::make_shared<LiveBucket>();
         uint32_t ledger = 1;
         uint32_t level = 0;
         bool isCurr = true;
-        REQUIRE_THROWS_AS(
-            app->getInvariantManager().checkOnBucketApply(
-                bucket, ledger, level, isCurr, [](auto) { return true; }),
-            InvariantDoesNotHold);
+        REQUIRE_THROWS_AS(app->getInvariantManager().checkOnBucketApply(
+                              bucket, ledger, level, isCurr, {}),
+                          InvariantDoesNotHold);
     }
 
     {
@@ -184,12 +184,12 @@ TEST_CASE("onBucketApply fail succeed", "[invariant]")
         app->getInvariantManager().enableInvariant(
             TestInvariant::toString(0, false));
 
-        auto bucket = std::make_shared<Bucket>();
+        auto bucket = std::make_shared<LiveBucket>();
         uint32_t ledger = 1;
         uint32_t level = 0;
         bool isCurr = true;
         REQUIRE_NOTHROW(app->getInvariantManager().checkOnBucketApply(
-            bucket, ledger, level, isCurr, [](auto) { return true; }));
+            bucket, ledger, level, isCurr, {}));
     }
 }
 
diff --git a/src/invariant/test/LedgerEntryIsValidTests.cpp b/src/invariant/test/LedgerEntryIsValidTests.cpp
index 4d946183ee..082066e6e7 100644
--- a/src/invariant/test/LedgerEntryIsValidTests.cpp
+++ b/src/invariant/test/LedgerEntryIsValidTests.cpp
@@ -19,7 +19,7 @@ using namespace stellar::InvariantTestUtils;
 TEST_CASE("Trigger validity check for each entry type",
           "[invariant][ledgerentryisvalid]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"};
 
     VirtualClock clock;
@@ -67,7 +67,7 @@ TEST_CASE("Trigger validity check for each entry type",
 TEST_CASE("Modify ClaimableBalanceEntry",
           "[invariant][ledgerentryisvalid][claimablebalance]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LedgerEntryIsValid"};
 
     VirtualClock clock;
diff --git a/src/invariant/test/LiabilitiesMatchOffersTests.cpp b/src/invariant/test/LiabilitiesMatchOffersTests.cpp
index 1c95224341..c4de34c9c0 100644
--- a/src/invariant/test/LiabilitiesMatchOffersTests.cpp
+++ b/src/invariant/test/LiabilitiesMatchOffersTests.cpp
@@ -58,7 +58,7 @@ updateAccountWithRandomBalance(LedgerEntry le, Application& app,
 TEST_CASE("Create account above minimum balance",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -75,7 +75,7 @@ TEST_CASE("Create account above minimum balance",
 TEST_CASE("Create account below minimum balance",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -92,7 +92,7 @@ TEST_CASE("Create account below minimum balance",
 TEST_CASE("Create account then decrease balance below minimum",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -111,7 +111,7 @@ TEST_CASE("Create account then decrease balance below minimum",
 TEST_CASE("Account below minimum balance increases but stays below minimum",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -130,7 +130,7 @@ TEST_CASE("Account below minimum balance increases but stays below minimum",
 TEST_CASE("Account below minimum balance decreases",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     for (uint32_t i = 0; i < 10; ++i)
@@ -250,7 +250,7 @@ generateBuyingLiabilities(Application& app, LedgerEntry offer, bool excess,
 TEST_CASE("Create account then increase liabilities without changing balance",
           "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     VirtualClock clock;
@@ -289,7 +289,7 @@ TEST_CASE("Create account then increase liabilities without changing balance",
 
 TEST_CASE("Invariant for liabilities", "[invariant][liabilitiesmatchoffers]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"LiabilitiesMatchOffers"};
 
     VirtualClock clock;
diff --git a/src/invariant/test/OrderBookIsNotCrossedTests.cpp b/src/invariant/test/OrderBookIsNotCrossedTests.cpp
index c10a6a5daf..7e3b1ab2c4 100644
--- a/src/invariant/test/OrderBookIsNotCrossedTests.cpp
+++ b/src/invariant/test/OrderBookIsNotCrossedTests.cpp
@@ -109,7 +109,7 @@ TEST_CASE("OrderBookIsNotCrossed in-memory order book is consistent with "
           "[invariant][OrderBookIsNotCrossed]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     // When testing the order book not crossed invariant, enable it and no other
     // invariants (these tests do things which violate other invariants).
     cfg.INVARIANT_CHECKS = {};
@@ -185,7 +185,7 @@ TEST_CASE("OrderBookIsNotCrossed properly throws if order book is crossed",
 {
 
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     // When testing the order book not crossed invariant, enable it and no other
     // invariants (these tests do things which violate other invariants).
     cfg.INVARIANT_CHECKS = {};
diff --git a/src/invariant/test/SponsorshipCountIsValidTests.cpp b/src/invariant/test/SponsorshipCountIsValidTests.cpp
index 9f35cd5292..91d75c805b 100644
--- a/src/invariant/test/SponsorshipCountIsValidTests.cpp
+++ b/src/invariant/test/SponsorshipCountIsValidTests.cpp
@@ -18,7 +18,7 @@ using namespace stellar::InvariantTestUtils;
 TEST_CASE("sponsorship invariant", "[invariant][sponsorshipcountisvalid]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.INVARIANT_CHECKS = {"SponsorshipCountIsValid"};
     auto app = createTestApplication(clock, cfg);
 
diff --git a/src/ledger/InMemoryLedgerTxn.cpp b/src/ledger/InMemoryLedgerTxn.cpp
index bcdaca07a2..1ba5e5e7af 100644
--- a/src/ledger/InMemoryLedgerTxn.cpp
+++ b/src/ledger/InMemoryLedgerTxn.cpp
@@ -4,9 +4,12 @@
 
 #include "ledger/InMemoryLedgerTxn.h"
 #include "crypto/SecretKey.h"
+#include "ledger/InMemoryLedgerTxnRoot.h"
+#include "ledger/LedgerTxn.h"
 #include "ledger/LedgerTxnImpl.h"
 #include "transactions/TransactionUtils.h"
 #include "util/GlobalChecks.h"
+#include "util/UnorderedMap.h"
 #include "util/XDROperators.h"
 
 namespace stellar
@@ -73,8 +76,9 @@ InMemoryLedgerTxn::FilteredEntryIteratorImpl::clone() const
 }
 
 InMemoryLedgerTxn::InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent,
-                                     Database& db)
-    : LedgerTxn(parent), mDb(db)
+                                     Database& db,
+                                     AbstractLedgerTxnParent* realRoot)
+    : LedgerTxn(parent), mDb(db), mRealRootForOffers(realRoot)
 {
 }
 
@@ -141,6 +145,36 @@ InMemoryLedgerTxn::updateLedgerKeyMap(EntryIterator iter)
     {
         auto const& genKey = iter.key();
         updateLedgerKeyMap(genKey, iter.entryExists());
+
+        // In addition to maintaining in-memory map, commit offers to "real" ltx
+        // root to test SQL backed offers
+        if (mRealRootForOffers &&
+            genKey.type() == InternalLedgerEntryType::LEDGER_ENTRY)
+        {
+            auto const& ledgerKey = genKey.ledgerKey();
+            if (ledgerKey.type() == OFFER)
+            {
+                LedgerTxn ltx(*mRealRootForOffers);
+                if (!iter.entryExists())
+                {
+                    ltx.erase(ledgerKey);
+                }
+                else
+                {
+                    auto ltxe = ltx.load(genKey);
+                    if (!ltxe)
+                    {
+                        ltx.create(iter.entry());
+                    }
+                    else
+                    {
+                        ltxe.current() = iter.entry().ledgerEntry();
+                    }
+                }
+
+                ltx.commit();
+            }
+        }
     }
 }
 
@@ -332,4 +366,74 @@ InMemoryLedgerTxn::getPoolShareTrustLinesByAccountAndAsset(
     return res;
 }
 
+void
+InMemoryLedgerTxn::dropOffers(bool rebuild)
+{
+    if (mRealRootForOffers)
+    {
+        mRealRootForOffers->dropOffers(rebuild);
+    }
+    else
+    {
+        LedgerTxn::dropOffers(rebuild);
+    }
+}
+
+uint64_t
+InMemoryLedgerTxn::countOffers(LedgerRange const& ledgers) const
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->countOffers(ledgers);
+    }
+
+    return LedgerTxn::countOffers(ledgers);
+}
+
+void
+InMemoryLedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const
+{
+    if (mRealRootForOffers)
+    {
+        mRealRootForOffers->deleteOffersModifiedOnOrAfterLedger(ledger);
+    }
+    else
+    {
+        LedgerTxn::deleteOffersModifiedOnOrAfterLedger(ledger);
+    }
+}
+
+UnorderedMap<LedgerKey, LedgerEntry>
+InMemoryLedgerTxn::getAllOffers()
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->getAllOffers();
+    }
+
+    return LedgerTxn::getAllOffers();
+}
+
+std::shared_ptr<LedgerEntry const>
+InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling)
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->getBestOffer(buying, selling);
+    }
+
+    return LedgerTxn::getBestOffer(buying, selling);
+}
+
+std::shared_ptr<LedgerEntry const>
+InMemoryLedgerTxn::getBestOffer(Asset const& buying, Asset const& selling,
+                                OfferDescriptor const& worseThan)
+{
+    if (mRealRootForOffers)
+    {
+        return mRealRootForOffers->getBestOffer(buying, selling, worseThan);
+    }
+
+    return LedgerTxn::getBestOffer(buying, selling, worseThan);
+}
 }
diff --git a/src/ledger/InMemoryLedgerTxn.h b/src/ledger/InMemoryLedgerTxn.h
index 76cf56fcae..2f8d03d3ed 100644
--- a/src/ledger/InMemoryLedgerTxn.h
+++ b/src/ledger/InMemoryLedgerTxn.h
@@ -44,6 +44,12 @@ class InMemoryLedgerTxn : public LedgerTxn
     Database& mDb;
     std::unique_ptr<soci::transaction> mTransaction;
 
+    // For some tests, we need to bypass ledger close and commit directly to the
+    // in-memory ltx. However, we still want to test SQL backed offers. The
+    // "never" committing root sets this flag to true such that offer-related
+    // calls get based to the real SQL backed root
+    AbstractLedgerTxnParent* const mRealRootForOffers;
+
     UnorderedMap<AccountID, UnorderedSet<InternalLedgerKey>>
         mOffersAndPoolShareTrustlineKeys;
 
@@ -75,7 +81,8 @@ class InMemoryLedgerTxn : public LedgerTxn
     EntryIterator getFilteredEntryIterator(EntryIterator const& iter);
 
   public:
-    InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db);
+    InMemoryLedgerTxn(InMemoryLedgerTxnRoot& parent, Database& db,
+                      AbstractLedgerTxnParent* realRoot = nullptr);
     virtual ~InMemoryLedgerTxn();
 
     void addChild(AbstractLedgerTxn& child, TransactionMode mode) override;
@@ -100,6 +107,19 @@ class InMemoryLedgerTxn : public LedgerTxn
     UnorderedMap<LedgerKey, LedgerEntry>
     getPoolShareTrustLinesByAccountAndAsset(AccountID const& account,
                                             Asset const& asset) override;
+
+    // These functions call into the real LedgerTxn root to test offer SQL
+    // related functionality
+    UnorderedMap<LedgerKey, LedgerEntry> getAllOffers() override;
+    std::shared_ptr<LedgerEntry const>
+    getBestOffer(Asset const& buying, Asset const& selling) override;
+    std::shared_ptr<LedgerEntry const>
+    getBestOffer(Asset const& buying, Asset const& selling,
+                 OfferDescriptor const& worseThan) override;
+
+    void dropOffers(bool rebuild) override;
+    uint64_t countOffers(LedgerRange const& ledgers) const override;
+    void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override;
 };
 
 }
diff --git a/src/ledger/InMemoryLedgerTxnRoot.cpp b/src/ledger/InMemoryLedgerTxnRoot.cpp
index 386ceb2e93..891a493ea8 100644
--- a/src/ledger/InMemoryLedgerTxnRoot.cpp
+++ b/src/ledger/InMemoryLedgerTxnRoot.cpp
@@ -97,74 +97,22 @@ InMemoryLedgerTxnRoot::getNewestVersion(InternalLedgerKey const& key) const
 }
 
 uint64_t
-InMemoryLedgerTxnRoot::countObjects(LedgerEntryType let) const
-{
-    return 0;
-}
-
-uint64_t
-InMemoryLedgerTxnRoot::countObjects(LedgerEntryType let,
-                                    LedgerRange const& ledgers) const
+InMemoryLedgerTxnRoot::countOffers(LedgerRange const& ledgers) const
 {
     return 0;
 }
 
 void
-InMemoryLedgerTxnRoot::deleteObjectsModifiedOnOrAfterLedger(
+InMemoryLedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger(
     uint32_t ledger) const
 {
 }
 
-void
-InMemoryLedgerTxnRoot::dropAccounts(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropData(bool)
-{
-}
-
 void
 InMemoryLedgerTxnRoot::dropOffers(bool)
 {
 }
 
-void
-InMemoryLedgerTxnRoot::dropTrustLines(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropClaimableBalances(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropLiquidityPools(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropContractData(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropContractCode(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropConfigSettings(bool)
-{
-}
-
-void
-InMemoryLedgerTxnRoot::dropTTL(bool)
-{
-}
-
 double
 InMemoryLedgerTxnRoot::getPrefetchHitRate() const
 {
diff --git a/src/ledger/InMemoryLedgerTxnRoot.h b/src/ledger/InMemoryLedgerTxnRoot.h
index 5d4bc3fe19..647bc20823 100644
--- a/src/ledger/InMemoryLedgerTxnRoot.h
+++ b/src/ledger/InMemoryLedgerTxnRoot.h
@@ -64,22 +64,11 @@ class InMemoryLedgerTxnRoot : public AbstractLedgerTxnParent
     std::shared_ptr<InternalLedgerEntry const>
     getNewestVersion(InternalLedgerKey const& key) const override;
 
-    uint64_t countObjects(LedgerEntryType let) const override;
-    uint64_t countObjects(LedgerEntryType let,
-                          LedgerRange const& ledgers) const override;
+    uint64_t countOffers(LedgerRange const& ledgers) const override;
 
-    void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override;
+    void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override;
 
-    void dropAccounts(bool rebuild) override;
-    void dropData(bool rebuild) override;
     void dropOffers(bool rebuild) override;
-    void dropTrustLines(bool rebuild) override;
-    void dropClaimableBalances(bool rebuild) override;
-    void dropLiquidityPools(bool rebuild) override;
-    void dropContractData(bool rebuild) override;
-    void dropContractCode(bool rebuild) override;
-    void dropConfigSettings(bool rebuild) override;
-    void dropTTL(bool rebuild) override;
     double getPrefetchHitRate() const override;
     uint32_t prefetchClassic(UnorderedSet<LedgerKey> const& keys) override;
     uint32_t prefetchSoroban(UnorderedSet<LedgerKey> const& keys,
diff --git a/src/ledger/LedgerManager.h b/src/ledger/LedgerManager.h
index 88d0ca8bb8..aacc186215 100644
--- a/src/ledger/LedgerManager.h
+++ b/src/ledger/LedgerManager.h
@@ -172,7 +172,7 @@ class LedgerManager
     virtual void
     startCatchup(CatchupConfiguration configuration,
                  std::shared_ptr<HistoryArchive> archive,
-                 std::set<std::shared_ptr<Bucket>> bucketsToRetain) = 0;
+                 std::set<std::shared_ptr<LiveBucket>> bucketsToRetain) = 0;
 
     // Forcibly close the current ledger, applying `ledgerData` as the consensus
     // changes.  This is normally done automatically as part of
diff --git a/src/ledger/LedgerManagerImpl.cpp b/src/ledger/LedgerManagerImpl.cpp
index 2b7f328671..5ab3350a7e 100644
--- a/src/ledger/LedgerManagerImpl.cpp
+++ b/src/ledger/LedgerManagerImpl.cpp
@@ -351,41 +351,35 @@ LedgerManagerImpl::loadLastKnownLedger(bool restoreBucketlist,
 
     releaseAssert(latestLedgerHeader.has_value());
 
-    // Step 3. Restore BucketList if we're doing a full core startup
-    // (startServices=true), OR when using BucketListDB
-    if (restoreBucketlist || mApp.getConfig().isUsingBucketListDB())
+    HistoryArchiveState has = getLastClosedLedgerHAS();
+    auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has);
+    auto pubmissing =
+        mApp.getHistoryManager().getMissingBucketsReferencedByPublishQueue();
+    missing.insert(missing.end(), pubmissing.begin(), pubmissing.end());
+    if (!missing.empty())
     {
-        HistoryArchiveState has = getLastClosedLedgerHAS();
-        auto missing = mApp.getBucketManager().checkForMissingBucketsFiles(has);
-        auto pubmissing = mApp.getHistoryManager()
-                              .getMissingBucketsReferencedByPublishQueue();
-        missing.insert(missing.end(), pubmissing.begin(), pubmissing.end());
-        if (!missing.empty())
+        CLOG_ERROR(Ledger, "{} buckets are missing from bucket directory '{}'",
+                   missing.size(), mApp.getBucketManager().getBucketDir());
+        throw std::runtime_error("Bucket directory is corrupt");
+    }
+
+    if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
+    {
+        // Only restart merges in full startup mode. Many modes in core
+        // (standalone offline commands, in-memory setup) do not need to
+        // spin up expensive merge processes.
+        auto assumeStateWork =
+            mApp.getWorkScheduler().executeWork<AssumeStateWork>(
+                has, latestLedgerHeader->ledgerVersion, restoreBucketlist);
+        if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS)
         {
-            CLOG_ERROR(Ledger,
-                       "{} buckets are missing from bucket directory '{}'",
-                       missing.size(), mApp.getBucketManager().getBucketDir());
-            throw std::runtime_error("Bucket directory is corrupt");
+            CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}",
+                      ledgerAbbrev(*latestLedgerHeader));
         }
-
-        if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
+        else
         {
-            // Only restart merges in full startup mode. Many modes in core
-            // (standalone offline commands, in-memory setup) do not need to
-            // spin up expensive merge processes.
-            auto assumeStateWork =
-                mApp.getWorkScheduler().executeWork<AssumeStateWork>(
-                    has, latestLedgerHeader->ledgerVersion, restoreBucketlist);
-            if (assumeStateWork->getState() == BasicWork::State::WORK_SUCCESS)
-            {
-                CLOG_INFO(Ledger, "Assumed bucket-state for LCL: {}",
-                          ledgerAbbrev(*latestLedgerHeader));
-            }
-            else
-            {
-                // Work should only fail during graceful shutdown
-                releaseAssertOrThrow(mApp.isStopping());
-            }
+            // Work should only fail during graceful shutdown
+            releaseAssertOrThrow(mApp.isStopping());
         }
     }
 
@@ -731,7 +725,7 @@ LedgerManagerImpl::closeLedgerIf(LedgerCloseData const& ledgerData)
 void
 LedgerManagerImpl::startCatchup(
     CatchupConfiguration configuration, std::shared_ptr<HistoryArchive> archive,
-    std::set<std::shared_ptr<Bucket>> bucketsToRetain)
+    std::set<std::shared_ptr<LiveBucket>> bucketsToRetain)
 {
     ZoneScoped;
     setState(LM_CATCHING_UP_STATE);
@@ -1047,9 +1041,7 @@ LedgerManagerImpl::closeLedger(LedgerCloseData const& ledgerData)
     ltx.commit();
 
     // step 3
-    if (protocolVersionStartsFrom(initialLedgerVers,
-                                  SOROBAN_PROTOCOL_VERSION) &&
-        mApp.getConfig().isUsingBackgroundEviction())
+    if (protocolVersionStartsFrom(initialLedgerVers, SOROBAN_PROTOCOL_VERSION))
     {
         mApp.getBucketManager().startBackgroundEvictionScan(ledgerSeq + 1);
     }
@@ -1307,13 +1299,16 @@ LedgerManagerImpl::advanceLedgerPointers(LedgerHeader const& header,
     mLastClosedLedger.hash = ledgerHash;
     mLastClosedLedger.header = header;
 
-    if (mApp.getConfig().isUsingBucketListDB() &&
-        header.ledgerSeq != prevLedgerSeq)
+    if (header.ledgerSeq != prevLedgerSeq)
     {
-        mApp.getBucketManager()
-            .getBucketSnapshotManager()
-            .updateCurrentSnapshot(std::make_unique<BucketListSnapshot>(
-                mApp.getBucketManager().getBucketList(), header));
+        auto& bm = mApp.getBucketManager();
+        auto liveSnapshot = std::make_unique<BucketListSnapshot<LiveBucket>>(
+            bm.getLiveBucketList(), header);
+        auto hotArchiveSnapshot =
+            std::make_unique<BucketListSnapshot<HotArchiveBucket>>(
+                bm.getHotArchiveBucketList(), header);
+        bm.getBucketSnapshotManager().updateCurrentSnapshot(
+            std::move(liveSnapshot), std::move(hotArchiveSnapshot));
     }
 }
 
@@ -1485,10 +1480,7 @@ LedgerManagerImpl::prefetchTransactionData(
         {
             if (tx->isSoroban())
             {
-                if (mApp.getConfig().isUsingBucketListDB())
-                {
-                    tx->insertKeysForTxApply(sorobanKeys, lkMeter.get());
-                }
+                tx->insertKeysForTxApply(sorobanKeys, lkMeter.get());
             }
             else
             {
@@ -1497,14 +1489,11 @@ LedgerManagerImpl::prefetchTransactionData(
         }
         // Prefetch classic and soroban keys separately for greater visibility
         // into the performance of each mode.
-        if (mApp.getConfig().isUsingBucketListDB())
+        if (!sorobanKeys.empty())
         {
-            if (!sorobanKeys.empty())
-            {
-                mApp.getLedgerTxnRoot().prefetchSoroban(sorobanKeys,
-                                                        lkMeter.get());
-            }
+            mApp.getLedgerTxnRoot().prefetchSoroban(sorobanKeys, lkMeter.get());
         }
+
         mApp.getLedgerTxnRoot().prefetchClassic(classicKeys);
     }
 }
@@ -1656,10 +1645,10 @@ LedgerManagerImpl::storeCurrentLedger(LedgerHeader const& header,
     mApp.getPersistentState().setState(PersistentState::kLastClosedLedger,
                                        binToHex(hash));
 
-    BucketList bl;
+    LiveBucketList bl;
     if (mApp.getConfig().MODE_ENABLES_BUCKETLIST)
     {
-        bl = mApp.getBucketManager().getBucketList();
+        bl = mApp.getBucketManager().getLiveBucketList();
     }
     // Store the current HAS in the database; this is really just to checkpoint
     // the bucketlist so we can survive a restart and re-attach to the buckets.
@@ -1698,17 +1687,8 @@ LedgerManagerImpl::transferLedgerEntriesToBucketList(
         {
             auto keys = ltx.getAllTTLKeysWithoutSealing();
             LedgerTxn ltxEvictions(ltx);
-
-            if (mApp.getConfig().isUsingBackgroundEviction())
-            {
-                mApp.getBucketManager().resolveBackgroundEvictionScan(
-                    ltxEvictions, lh.ledgerSeq, keys);
-            }
-            else
-            {
-                mApp.getBucketManager().scanForEvictionLegacy(ltxEvictions,
-                                                              lh.ledgerSeq);
-            }
+            mApp.getBucketManager().resolveBackgroundEvictionScan(
+                ltxEvictions, lh.ledgerSeq, keys);
 
             if (ledgerCloseMeta)
             {
@@ -1725,8 +1705,8 @@ LedgerManagerImpl::transferLedgerEntriesToBucketList(
     ltx.getAllEntries(initEntries, liveEntries, deadEntries);
     if (blEnabled)
     {
-        mApp.getBucketManager().addBatch(mApp, lh, initEntries, liveEntries,
-                                         deadEntries);
+        mApp.getBucketManager().addLiveBatch(mApp, lh, initEntries, liveEntries,
+                                             deadEntries);
     }
 }
 
diff --git a/src/ledger/LedgerManagerImpl.h b/src/ledger/LedgerManagerImpl.h
index a5b1ae860a..4217b964de 100644
--- a/src/ledger/LedgerManagerImpl.h
+++ b/src/ledger/LedgerManagerImpl.h
@@ -181,10 +181,10 @@ class LedgerManagerImpl : public LedgerManager
 
     Database& getDatabase() override;
 
-    void
-    startCatchup(CatchupConfiguration configuration,
-                 std::shared_ptr<HistoryArchive> archive,
-                 std::set<std::shared_ptr<Bucket>> bucketsToRetain) override;
+    void startCatchup(
+        CatchupConfiguration configuration,
+        std::shared_ptr<HistoryArchive> archive,
+        std::set<std::shared_ptr<LiveBucket>> bucketsToRetain) override;
 
     void closeLedger(LedgerCloseData const& ledgerData) override;
     void deleteOldEntries(Database& db, uint32_t ledgerSeq,
diff --git a/src/ledger/LedgerStateSnapshot.cpp b/src/ledger/LedgerStateSnapshot.cpp
index 6f0228884e..10aedf0ed4 100644
--- a/src/ledger/LedgerStateSnapshot.cpp
+++ b/src/ledger/LedgerStateSnapshot.cpp
@@ -164,7 +164,7 @@ LedgerTxnReadOnly::executeWithMaybeInnerSnapshot(
 }
 
 BucketSnapshotState::BucketSnapshotState(BucketManager& bm)
-    : mSnapshot(bm.getSearchableBucketListSnapshot())
+    : mSnapshot(bm.getSearchableLiveBucketListSnapshot())
     , mLedgerHeader(LedgerHeaderWrapper(
           std::make_shared<LedgerHeader>(mSnapshot->getLedgerHeader())))
 {
@@ -223,7 +223,8 @@ LedgerSnapshot::LedgerSnapshot(AbstractLedgerTxn& ltx)
 
 LedgerSnapshot::LedgerSnapshot(Application& app)
 {
-    if (app.getConfig().DEPRECATED_SQL_LEDGER_STATE)
+#ifdef BUILD_TESTS
+    if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         // Legacy read-only SQL transaction
         mLegacyLedgerTxn = std::make_unique<LedgerTxn>(
@@ -232,9 +233,8 @@ LedgerSnapshot::LedgerSnapshot(Application& app)
         mGetter = std::make_unique<LedgerTxnReadOnly>(*mLegacyLedgerTxn);
     }
     else
-    {
+#endif
         mGetter = std::make_unique<BucketSnapshotState>(app.getBucketManager());
-    }
 }
 
 LedgerHeaderWrapper
diff --git a/src/ledger/LedgerStateSnapshot.h b/src/ledger/LedgerStateSnapshot.h
index 7a57b1c771..dc4f6f76f9 100644
--- a/src/ledger/LedgerStateSnapshot.h
+++ b/src/ledger/LedgerStateSnapshot.h
@@ -105,7 +105,7 @@ class LedgerTxnReadOnly : public AbstractLedgerStateSnapshot
 // A concrete implementation of read-only BucketList snapshot wrapper
 class BucketSnapshotState : public AbstractLedgerStateSnapshot
 {
-    std::shared_ptr<SearchableBucketListSnapshot> mSnapshot;
+    std::shared_ptr<SearchableLiveBucketListSnapshot> mSnapshot;
     // Store a copy of the header from mSnapshot. This is needed for
     // validation flow where for certain validation scenarios the header needs
     // to be modified
diff --git a/src/ledger/LedgerTxn.cpp b/src/ledger/LedgerTxn.cpp
index 2085d7c92c..db669f8f72 100644
--- a/src/ledger/LedgerTxn.cpp
+++ b/src/ledger/LedgerTxn.cpp
@@ -23,6 +23,7 @@
 #include "util/XDRStream.h"
 #include "util/types.h"
 #include "xdr/Stellar-ledger-entries.h"
+#include "xdr/Stellar-types.h"
 #include "xdrpp/marshal.h"
 #include <Tracy.hpp>
 #include <soci.h>
@@ -2009,34 +2010,16 @@ LedgerTxn::Impl::unsealHeader(LedgerTxn& self,
 }
 
 uint64_t
-LedgerTxn::countObjects(LedgerEntryType let) const
+LedgerTxn::countOffers(LedgerRange const& ledgers) const
 {
-    throw std::runtime_error("called countObjects on non-root LedgerTxn");
-}
-
-uint64_t
-LedgerTxn::countObjects(LedgerEntryType let, LedgerRange const& ledgers) const
-{
-    throw std::runtime_error("called countObjects on non-root LedgerTxn");
+    throw std::runtime_error("called countOffers on non-root LedgerTxn");
 }
 
 void
-LedgerTxn::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const
+LedgerTxn::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const
 {
     throw std::runtime_error(
-        "called deleteObjectsModifiedOnOrAfterLedger on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropAccounts(bool rebuild)
-{
-    throw std::runtime_error("called dropAccounts on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropData(bool rebuild)
-{
-    throw std::runtime_error("called dropData on non-root LedgerTxn");
+        "called deleteOffersModifiedOnOrAfterLedger on non-root LedgerTxn");
 }
 
 void
@@ -2045,49 +2028,6 @@ LedgerTxn::dropOffers(bool rebuild)
     throw std::runtime_error("called dropOffers on non-root LedgerTxn");
 }
 
-void
-LedgerTxn::dropTrustLines(bool rebuild)
-{
-    throw std::runtime_error("called dropTrustLines on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropClaimableBalances(bool rebuild)
-{
-    throw std::runtime_error(
-        "called dropClaimableBalances on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropLiquidityPools(bool rebuild)
-{
-    throw std::runtime_error("called dropLiquidityPools on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropContractData(bool rebuild)
-{
-    throw std::runtime_error("called dropContractData on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropContractCode(bool rebuild)
-{
-    throw std::runtime_error("called dropContractCode on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropConfigSettings(bool rebuild)
-{
-    throw std::runtime_error("called dropConfigSettings on non-root LedgerTxn");
-}
-
-void
-LedgerTxn::dropTTL(bool rebuild)
-{
-    throw std::runtime_error("called dropTTL on non-root LedgerTxn");
-}
-
 double
 LedgerTxn::getPrefetchHitRate() const
 {
@@ -2617,8 +2557,7 @@ accum(EntryIterator const& iter, std::vector<EntryIterator>& upsertBuffer,
 
 // Return true only if something is actually accumulated and not skipped over
 bool
-BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter,
-                                             bool bucketListDBEnabled)
+BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter)
 {
     // Right now, only LEDGER_ENTRY are recorded in the SQL database
     if (iter.key().type() != InternalLedgerEntryType::LEDGER_ENTRY)
@@ -2626,55 +2565,15 @@ BulkLedgerEntryChangeAccumulator::accumulate(EntryIterator const& iter,
         return false;
     }
 
-    // Don't accumulate entry types that are supported by BucketListDB when it
-    // is enabled
+    // Don't accumulate entry types that are supported by BucketListDB
     auto type = iter.key().ledgerKey().type();
-    if (bucketListDBEnabled && !BucketIndex::typeNotSupported(type))
+    if (!BucketIndex::typeNotSupported(type))
     {
         return false;
     }
 
-    switch (type)
-    {
-    case ACCOUNT:
-        accum(iter, mAccountsToUpsert, mAccountsToDelete);
-        break;
-    case TRUSTLINE:
-        accum(iter, mTrustLinesToUpsert, mTrustLinesToDelete);
-        break;
-    case OFFER:
-        accum(iter, mOffersToUpsert, mOffersToDelete);
-        break;
-    case DATA:
-        accum(iter, mAccountDataToUpsert, mAccountDataToDelete);
-        break;
-    case CLAIMABLE_BALANCE:
-        accum(iter, mClaimableBalanceToUpsert, mClaimableBalanceToDelete);
-        break;
-    case LIQUIDITY_POOL:
-        accum(iter, mLiquidityPoolToUpsert, mLiquidityPoolToDelete);
-        break;
-    case CONTRACT_DATA:
-        accum(iter, mContractDataToUpsert, mContractDataToDelete);
-        break;
-    case CONTRACT_CODE:
-        accum(iter, mContractCodeToUpsert, mContractCodeToDelete);
-        break;
-    case CONFIG_SETTING:
-    {
-        // Configuration can not be deleted.
-        releaseAssert(iter.entryExists());
-        std::vector<EntryIterator> emptyEntries;
-        accum(iter, mConfigSettingsToUpsert, emptyEntries);
-        break;
-    }
-    case TTL:
-        accum(iter, mTTLToUpsert, mTTLToDelete);
-        break;
-    default:
-        abort();
-    }
-
+    releaseAssertOrThrow(type == OFFER);
+    accum(iter, mOffersToUpsert, mOffersToDelete);
     return true;
 }
 
@@ -2683,30 +2582,7 @@ LedgerTxnRoot::Impl::bulkApply(BulkLedgerEntryChangeAccumulator& bleca,
                                size_t bufferThreshold,
                                LedgerTxnConsistency cons)
 {
-    auto& upsertAccounts = bleca.getAccountsToUpsert();
-    if (upsertAccounts.size() > bufferThreshold)
-    {
-        bulkUpsertAccounts(upsertAccounts);
-        upsertAccounts.clear();
-    }
-    auto& deleteAccounts = bleca.getAccountsToDelete();
-    if (deleteAccounts.size() > bufferThreshold)
-    {
-        bulkDeleteAccounts(deleteAccounts, cons);
-        deleteAccounts.clear();
-    }
-    auto& upsertTrustLines = bleca.getTrustLinesToUpsert();
-    if (upsertTrustLines.size() > bufferThreshold)
-    {
-        bulkUpsertTrustLines(upsertTrustLines);
-        upsertTrustLines.clear();
-    }
-    auto& deleteTrustLines = bleca.getTrustLinesToDelete();
-    if (deleteTrustLines.size() > bufferThreshold)
-    {
-        bulkDeleteTrustLines(deleteTrustLines, cons);
-        deleteTrustLines.clear();
-    }
+
     auto& upsertOffers = bleca.getOffersToUpsert();
     if (upsertOffers.size() > bufferThreshold)
     {
@@ -2719,87 +2595,6 @@ LedgerTxnRoot::Impl::bulkApply(BulkLedgerEntryChangeAccumulator& bleca,
         bulkDeleteOffers(deleteOffers, cons);
         deleteOffers.clear();
     }
-    auto& upsertAccountData = bleca.getAccountDataToUpsert();
-    if (upsertAccountData.size() > bufferThreshold)
-    {
-        bulkUpsertAccountData(upsertAccountData);
-        upsertAccountData.clear();
-    }
-    auto& deleteAccountData = bleca.getAccountDataToDelete();
-    if (deleteAccountData.size() > bufferThreshold)
-    {
-        bulkDeleteAccountData(deleteAccountData, cons);
-        deleteAccountData.clear();
-    }
-    auto& upsertClaimableBalance = bleca.getClaimableBalanceToUpsert();
-    if (upsertClaimableBalance.size() > bufferThreshold)
-    {
-        bulkUpsertClaimableBalance(upsertClaimableBalance);
-        upsertClaimableBalance.clear();
-    }
-    auto& deleteClaimableBalance = bleca.getClaimableBalanceToDelete();
-    if (deleteClaimableBalance.size() > bufferThreshold)
-    {
-        bulkDeleteClaimableBalance(deleteClaimableBalance, cons);
-        deleteClaimableBalance.clear();
-    }
-    auto& upsertLiquidityPool = bleca.getLiquidityPoolToUpsert();
-    if (upsertLiquidityPool.size() > bufferThreshold)
-    {
-        bulkUpsertLiquidityPool(upsertLiquidityPool);
-        upsertLiquidityPool.clear();
-    }
-    auto& deleteLiquidityPool = bleca.getLiquidityPoolToDelete();
-    if (deleteLiquidityPool.size() > bufferThreshold)
-    {
-        bulkDeleteLiquidityPool(deleteLiquidityPool, cons);
-        deleteLiquidityPool.clear();
-    }
-    auto& upsertConfigSettings = bleca.getConfigSettingsToUpsert();
-    if (upsertConfigSettings.size() > bufferThreshold)
-    {
-        bulkUpsertConfigSettings(upsertConfigSettings);
-        upsertConfigSettings.clear();
-    }
-    auto& upsertContractData = bleca.getContractDataToUpsert();
-    if (upsertContractData.size() > bufferThreshold)
-    {
-        bulkUpsertContractData(upsertContractData);
-        upsertContractData.clear();
-    }
-    auto& deleteContractData = bleca.getContractDataToDelete();
-    if (deleteContractData.size() > bufferThreshold)
-    {
-        bulkDeleteContractData(deleteContractData, cons);
-        deleteContractData.clear();
-    }
-
-    auto& upsertContractCode = bleca.getContractCodeToUpsert();
-    if (upsertContractCode.size() > bufferThreshold)
-    {
-        bulkUpsertContractCode(upsertContractCode);
-        upsertContractCode.clear();
-    }
-    auto& deleteContractCode = bleca.getContractCodeToDelete();
-    if (deleteContractCode.size() > bufferThreshold)
-    {
-        bulkDeleteContractCode(deleteContractCode, cons);
-        deleteContractCode.clear();
-    }
-
-    auto& upsertTTL = bleca.getTTLToUpsert();
-    if (upsertTTL.size() > bufferThreshold)
-    {
-        bulkUpsertTTL(upsertTTL);
-        upsertTTL.clear();
-    }
-
-    auto& deleteTTL = bleca.getTTLToDelete();
-    if (deleteTTL.size() > bufferThreshold)
-    {
-        bulkDeleteTTL(deleteTTL, cons);
-        deleteTTL.clear();
-    }
 }
 
 void
@@ -2821,14 +2616,13 @@ LedgerTxnRoot::Impl::commitChild(EntryIterator iter,
     // guarantee, so use std::unique_ptr<...>::swap to achieve it
     auto childHeader = std::make_unique<LedgerHeader>(mChild->getHeader());
 
-    auto bucketListDBEnabled = mApp.getConfig().isUsingBucketListDB();
     auto bleca = BulkLedgerEntryChangeAccumulator();
     [[maybe_unused]] int64_t counter{0};
     try
     {
         while ((bool)iter)
         {
-            if (bleca.accumulate(iter, bucketListDBEnabled))
+            if (bleca.accumulate(iter))
             {
                 ++counter;
             }
@@ -2907,40 +2701,18 @@ LedgerTxnRoot::Impl::tableFromLedgerEntryType(LedgerEntryType let)
 }
 
 uint64_t
-LedgerTxnRoot::countObjects(LedgerEntryType let) const
+LedgerTxnRoot::countOffers(LedgerRange const& ledgers) const
 {
-    return mImpl->countObjects(let);
+    return mImpl->countOffers(ledgers);
 }
 
 uint64_t
-LedgerTxnRoot::Impl::countObjects(LedgerEntryType let) const
+LedgerTxnRoot::Impl::countOffers(LedgerRange const& ledgers) const
 {
     using namespace soci;
     throwIfChild();
 
-    std::string query =
-        "SELECT COUNT(*) FROM " + tableFromLedgerEntryType(let) + ";";
-    uint64_t count = 0;
-    mApp.getDatabase().getSession() << query, into(count);
-    return count;
-}
-
-uint64_t
-LedgerTxnRoot::countObjects(LedgerEntryType let,
-                            LedgerRange const& ledgers) const
-{
-    return mImpl->countObjects(let, ledgers);
-}
-
-uint64_t
-LedgerTxnRoot::Impl::countObjects(LedgerEntryType let,
-                                  LedgerRange const& ledgers) const
-{
-    using namespace soci;
-    throwIfChild();
-
-    std::string query = "SELECT COUNT(*) FROM " +
-                        tableFromLedgerEntryType(let) +
+    std::string query = "SELECT COUNT(*) FROM offers"
                         " WHERE lastmodified >= :v1 AND lastmodified < :v2;";
     uint64_t count = 0;
     int first = static_cast<int>(ledgers.mFirst);
@@ -2951,38 +2723,22 @@ LedgerTxnRoot::Impl::countObjects(LedgerEntryType let,
 }
 
 void
-LedgerTxnRoot::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const
+LedgerTxnRoot::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const
 {
-    return mImpl->deleteObjectsModifiedOnOrAfterLedger(ledger);
+    return mImpl->deleteOffersModifiedOnOrAfterLedger(ledger);
 }
 
 void
-LedgerTxnRoot::Impl::deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const
+LedgerTxnRoot::Impl::deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const
 {
     using namespace soci;
     throwIfChild();
     mEntryCache.clear();
     mBestOffers.clear();
 
-    for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-    {
-        LedgerEntryType t = static_cast<LedgerEntryType>(let);
-        std::string query = "DELETE FROM " + tableFromLedgerEntryType(t) +
-                            " WHERE lastmodified >= :v1";
-        mApp.getDatabase().getSession() << query, use(ledger);
-    }
-}
-
-void
-LedgerTxnRoot::dropAccounts(bool rebuild)
-{
-    mImpl->dropAccounts(rebuild);
-}
-
-void
-LedgerTxnRoot::dropData(bool rebuild)
-{
-    mImpl->dropData(rebuild);
+    std::string query = "DELETE FROM " + tableFromLedgerEntryType(OFFER) +
+                        " WHERE lastmodified >= :v1";
+    mApp.getDatabase().getSession() << query, use(ledger);
 }
 
 void
@@ -2991,48 +2747,6 @@ LedgerTxnRoot::dropOffers(bool rebuild)
     mImpl->dropOffers(rebuild);
 }
 
-void
-LedgerTxnRoot::dropTrustLines(bool rebuild)
-{
-    mImpl->dropTrustLines(rebuild);
-}
-
-void
-LedgerTxnRoot::dropClaimableBalances(bool rebuild)
-{
-    mImpl->dropClaimableBalances(rebuild);
-}
-
-void
-LedgerTxnRoot::dropLiquidityPools(bool rebuild)
-{
-    mImpl->dropLiquidityPools(rebuild);
-}
-
-void
-LedgerTxnRoot::dropContractData(bool rebuild)
-{
-    mImpl->dropContractData(rebuild);
-}
-
-void
-LedgerTxnRoot::dropContractCode(bool rebuild)
-{
-    mImpl->dropContractCode(rebuild);
-}
-
-void
-LedgerTxnRoot::dropConfigSettings(bool rebuild)
-{
-    mImpl->dropConfigSettings(rebuild);
-}
-
-void
-LedgerTxnRoot::dropTTL(bool rebuild)
-{
-    mImpl->dropTTL(rebuild);
-}
-
 uint32_t
 LedgerTxnRoot::prefetchClassic(UnorderedSet<LedgerKey> const& keys)
 {
@@ -3096,128 +2810,14 @@ LedgerTxnRoot::Impl::prefetchInternal(UnorderedSet<LedgerKey> const& keys,
         }
     };
 
-    if (mApp.getConfig().isUsingBucketListDB())
+    LedgerKeySet keysToSearch;
+    for (auto const& key : keys)
     {
-        LedgerKeySet keysToSearch;
-        for (auto const& key : keys)
-        {
-            insertIfNotLoaded(keysToSearch, key);
-        }
-        auto blLoad = getSearchableBucketListSnapshot().loadKeysWithLimits(
-            keysToSearch, lkMeter);
-        cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter));
-    }
-    else
-    {
-        UnorderedSet<LedgerKey> accounts;
-        UnorderedSet<LedgerKey> offers;
-        UnorderedSet<LedgerKey> trustlines;
-        UnorderedSet<LedgerKey> data;
-        UnorderedSet<LedgerKey> claimablebalance;
-        UnorderedSet<LedgerKey> liquiditypool;
-        UnorderedSet<LedgerKey> contractdata;
-        UnorderedSet<LedgerKey> configSettings;
-        UnorderedSet<LedgerKey> contractCode;
-        UnorderedSet<LedgerKey> ttl;
-
-        for (auto const& key : keys)
-        {
-            switch (key.type())
-            {
-            case ACCOUNT:
-                insertIfNotLoaded(accounts, key);
-                if (accounts.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadAccounts(accounts));
-                    accounts.clear();
-                }
-                break;
-            case OFFER:
-                insertIfNotLoaded(offers, key);
-                if (offers.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadOffers(offers));
-                    offers.clear();
-                }
-                break;
-            case TRUSTLINE:
-                insertIfNotLoaded(trustlines, key);
-                if (trustlines.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadTrustLines(trustlines));
-                    trustlines.clear();
-                }
-                break;
-            case DATA:
-                insertIfNotLoaded(data, key);
-                if (data.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadData(data));
-                    data.clear();
-                }
-                break;
-            case CLAIMABLE_BALANCE:
-                insertIfNotLoaded(claimablebalance, key);
-                if (claimablebalance.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadClaimableBalance(claimablebalance));
-                    claimablebalance.clear();
-                }
-                break;
-            case LIQUIDITY_POOL:
-                insertIfNotLoaded(liquiditypool, key);
-                if (liquiditypool.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadLiquidityPool(liquiditypool));
-                    liquiditypool.clear();
-                }
-                break;
-            case CONTRACT_DATA:
-                insertIfNotLoaded(contractdata, key);
-                if (contractdata.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadContractData(contractdata));
-                    contractdata.clear();
-                }
-                break;
-            case CONTRACT_CODE:
-                insertIfNotLoaded(contractCode, key);
-                if (contractCode.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadContractCode(contractCode));
-                    contractCode.clear();
-                }
-                break;
-            case CONFIG_SETTING:
-                insertIfNotLoaded(configSettings, key);
-                if (configSettings.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadConfigSettings(configSettings));
-                    configSettings.clear();
-                }
-                break;
-            case TTL:
-                insertIfNotLoaded(ttl, key);
-                if (ttl.size() == mBulkLoadBatchSize)
-                {
-                    cacheResult(bulkLoadTTL(ttl));
-                    ttl.clear();
-                }
-            }
-        }
-
-        //  Prefetch whatever is remaining
-        cacheResult(bulkLoadAccounts(accounts));
-        cacheResult(bulkLoadOffers(offers));
-        cacheResult(bulkLoadTrustLines(trustlines));
-        cacheResult(bulkLoadData(data));
-        cacheResult(bulkLoadClaimableBalance(claimablebalance));
-        cacheResult(bulkLoadLiquidityPool(liquiditypool));
-        cacheResult(bulkLoadConfigSettings(configSettings));
-        cacheResult(bulkLoadContractData(contractdata));
-        cacheResult(bulkLoadContractCode(contractCode));
-        cacheResult(bulkLoadTTL(ttl));
+        insertIfNotLoaded(keysToSearch, key);
     }
+    auto blLoad = getSearchableLiveBucketListSnapshot().loadKeysWithLimits(
+        keysToSearch, lkMeter);
+    cacheResult(populateLoadedEntries(keysToSearch, blLoad, lkMeter));
 
     return total;
 }
@@ -3486,15 +3086,15 @@ LedgerTxnRoot::Impl::areEntriesMissingInCacheForOffer(OfferEntry const& oe)
     return false;
 }
 
-SearchableBucketListSnapshot&
-LedgerTxnRoot::Impl::getSearchableBucketListSnapshot() const
+SearchableLiveBucketListSnapshot&
+LedgerTxnRoot::Impl::getSearchableLiveBucketListSnapshot() const
 {
-    releaseAssert(mApp.getConfig().isUsingBucketListDB());
     if (!mSearchableBucketListSnapshot)
     {
-        mSearchableBucketListSnapshot = mApp.getBucketManager()
-                                            .getBucketSnapshotManager()
-                                            .copySearchableBucketListSnapshot();
+        mSearchableBucketListSnapshot =
+            mApp.getBucketManager()
+                .getBucketSnapshotManager()
+                .copySearchableLiveBucketListSnapshot();
     }
 
     return *mSearchableBucketListSnapshot;
@@ -3632,17 +3232,9 @@ LedgerTxnRoot::Impl::getPoolShareTrustLinesByAccountAndAsset(
     std::vector<LedgerEntry> trustLines;
     try
     {
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            trustLines =
-                getSearchableBucketListSnapshot()
-                    .loadPoolShareTrustLinesByAccountAndAsset(account, asset);
-        }
-        else
-        {
-            trustLines =
-                loadPoolShareTrustLinesByAccountAndAsset(account, asset);
-        }
+        trustLines =
+            getSearchableLiveBucketListSnapshot()
+                .loadPoolShareTrustLinesByAccountAndAsset(account, asset);
     }
     catch (NonSociRelatedException&)
     {
@@ -3696,15 +3288,8 @@ LedgerTxnRoot::Impl::getInflationWinners(size_t maxWinners, int64_t minVotes)
 {
     try
     {
-        if (mApp.getConfig().isUsingBucketListDB())
-        {
-            return getSearchableBucketListSnapshot().loadInflationWinners(
-                maxWinners, minVotes);
-        }
-        else
-        {
-            return loadInflationWinners(maxWinners, minVotes);
-        }
+        return getSearchableLiveBucketListSnapshot().loadInflationWinners(
+            maxWinners, minVotes);
     }
     catch (std::exception& e)
     {
@@ -3752,47 +3337,13 @@ LedgerTxnRoot::Impl::getNewestVersion(InternalLedgerKey const& gkey) const
     std::shared_ptr<LedgerEntry const> entry;
     try
     {
-        if (mApp.getConfig().isUsingBucketListDB() && key.type() != OFFER)
+        if (key.type() != OFFER)
         {
-            entry = getSearchableBucketListSnapshot().load(key);
+            entry = getSearchableLiveBucketListSnapshot().load(key);
         }
         else
         {
-            switch (key.type())
-            {
-            case ACCOUNT:
-                entry = loadAccount(key);
-                break;
-            case DATA:
-                entry = loadData(key);
-                break;
-            case OFFER:
-                entry = loadOffer(key);
-                break;
-            case TRUSTLINE:
-                entry = loadTrustLine(key);
-                break;
-            case CLAIMABLE_BALANCE:
-                entry = loadClaimableBalance(key);
-                break;
-            case LIQUIDITY_POOL:
-                entry = loadLiquidityPool(key);
-                break;
-            case CONTRACT_DATA:
-                entry = loadContractData(key);
-                break;
-            case CONTRACT_CODE:
-                entry = loadContractCode(key);
-                break;
-            case CONFIG_SETTING:
-                entry = loadConfigSetting(key);
-                break;
-            case TTL:
-                entry = loadTTL(key);
-                break;
-            default:
-                throw std::runtime_error("Unknown key type");
-            }
+            entry = loadOffer(key);
         }
     }
     catch (NonSociRelatedException&)
diff --git a/src/ledger/LedgerTxn.h b/src/ledger/LedgerTxn.h
index 6e755f651e..8da6dd3be9 100644
--- a/src/ledger/LedgerTxn.h
+++ b/src/ledger/LedgerTxn.h
@@ -463,61 +463,19 @@ class AbstractLedgerTxnParent
     virtual std::shared_ptr<InternalLedgerEntry const>
     getNewestVersion(InternalLedgerKey const& key) const = 0;
 
-    // Return the count of the number of ledger objects of type `let`. Will
-    // throw when called on anything other than a (real or stub) root LedgerTxn.
-    virtual uint64_t countObjects(LedgerEntryType let) const = 0;
-
-    // Return the count of the number of ledger objects of type `let` within
+    // Return the count of the number of offer objects of type `let` within
     // range of ledgers `ledgers`. Will throw when called on anything other than
     // a (real or stub) root LedgerTxn.
-    virtual uint64_t countObjects(LedgerEntryType let,
-                                  LedgerRange const& ledgers) const = 0;
+    virtual uint64_t countOffers(LedgerRange const& ledgers) const = 0;
 
     // Delete all ledger entries modified on-or-after `ledger`. Will throw
     // when called on anything other than a (real or stub) root LedgerTxn.
-    virtual void
-    deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const = 0;
-
-    // Delete all account ledger entries in the database. Will throw when called
-    // on anything other than a (real or stub) root LedgerTxn.
-    virtual void dropAccounts(bool rebuild) = 0;
-
-    // Delete all account-data ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropData(bool rebuild) = 0;
+    virtual void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const = 0;
 
     // Delete all offer ledger entries. Will throw when called on anything other
     // than a (real or stub) root LedgerTxn.
     virtual void dropOffers(bool rebuild) = 0;
 
-    // Delete all trustline ledger entries. Will throw when called on anything
-    // other than a (real or stub) root LedgerTxn.
-    virtual void dropTrustLines(bool rebuild) = 0;
-
-    // Delete all claimable balance ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropClaimableBalances(bool rebuild) = 0;
-
-    // Delete all liquidity pool ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropLiquidityPools(bool rebuild) = 0;
-
-    // Delete all contract data ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropContractData(bool rebuild) = 0;
-
-    // Delete all contract code ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropContractCode(bool rebuild) = 0;
-
-    // Delete all config setting ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropConfigSettings(bool rebuild) = 0;
-
-    // Delete all ttl ledger entries. Will throw when called on
-    // anything other than a (real or stub) root LedgerTxn.
-    virtual void dropTTL(bool rebuild) = 0;
-
     // Return the current cache hit rate for prefetched ledger entries, as a
     // fraction from 0.0 to 1.0. Will throw when called on anything other than a
     // (real or stub) root LedgerTxn.
@@ -815,20 +773,9 @@ class LedgerTxn : public AbstractLedgerTxn
 
     void unsealHeader(std::function<void(LedgerHeader&)> f) override;
 
-    uint64_t countObjects(LedgerEntryType let) const override;
-    uint64_t countObjects(LedgerEntryType let,
-                          LedgerRange const& ledgers) const override;
-    void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override;
-    void dropAccounts(bool rebuild) override;
-    void dropData(bool rebuild) override;
+    uint64_t countOffers(LedgerRange const& ledgers) const override;
+    void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override;
     void dropOffers(bool rebuild) override;
-    void dropTrustLines(bool rebuild) override;
-    void dropClaimableBalances(bool rebuild) override;
-    void dropLiquidityPools(bool rebuild) override;
-    void dropContractData(bool rebuild) override;
-    void dropContractCode(bool rebuild) override;
-    void dropConfigSettings(bool rebuild) override;
-    void dropTTL(bool rebuild) override;
 
     double getPrefetchHitRate() const override;
     uint32_t prefetchClassic(UnorderedSet<LedgerKey> const& keys) override;
@@ -879,22 +826,11 @@ class LedgerTxnRoot : public AbstractLedgerTxnParent
     void commitChild(EntryIterator iter,
                      LedgerTxnConsistency cons) noexcept override;
 
-    uint64_t countObjects(LedgerEntryType let) const override;
-    uint64_t countObjects(LedgerEntryType let,
-                          LedgerRange const& ledgers) const override;
+    uint64_t countOffers(LedgerRange const& ledgers) const override;
 
-    void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const override;
+    void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const override;
 
-    void dropAccounts(bool rebuild) override;
-    void dropData(bool rebuild) override;
     void dropOffers(bool rebuild) override;
-    void dropTrustLines(bool rebuild) override;
-    void dropClaimableBalances(bool rebuild) override;
-    void dropLiquidityPools(bool rebuild) override;
-    void dropContractData(bool rebuild) override;
-    void dropContractCode(bool rebuild) override;
-    void dropConfigSettings(bool rebuild) override;
-    void dropTTL(bool rebuild) override;
 
 #ifdef BUILD_TESTS
     void resetForFuzzer() override;
diff --git a/src/ledger/LedgerTxnAccountSQL.cpp b/src/ledger/LedgerTxnAccountSQL.cpp
deleted file mode 100644
index db51158f65..0000000000
--- a/src/ledger/LedgerTxnAccountSQL.cpp
+++ /dev/null
@@ -1,678 +0,0 @@
-// Copyright 2018 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "crypto/KeyUtils.h"
-#include "crypto/SecretKey.h"
-#include "crypto/SignerKey.h"
-#include "database/Database.h"
-#include "database/DatabaseTypeSpecificOperation.h"
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "main/Application.h"
-#include "util/Decoder.h"
-#include "util/GlobalChecks.h"
-#include "util/Logging.h"
-#include "util/XDROperators.h"
-#include "util/types.h"
-#include "xdrpp/marshal.h"
-#include <Tracy.hpp>
-
-namespace stellar
-{
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadAccount(LedgerKey const& key) const
-{
-    ZoneScoped;
-    std::string actIDStrKey = KeyUtils::toStrKey(key.account().accountID);
-
-    std::string inflationDest, homeDomain, thresholds, signers;
-    soci::indicator inflationDestInd, signersInd;
-    std::string extensionStr;
-    soci::indicator extensionInd;
-    std::string ledgerExtStr;
-    soci::indicator ledgerExtInd;
-
-    LedgerEntry le;
-    le.data.type(ACCOUNT);
-    auto& account = le.data.account();
-
-    auto prep = mApp.getDatabase().getPreparedStatement(
-        "SELECT balance, seqnum, numsubentries, "
-        "inflationdest, homedomain, thresholds, "
-        "flags, lastmodified, "
-        "signers, extension, "
-        "ledgerext FROM accounts WHERE accountid=:v1");
-    auto& st = prep.statement();
-    st.exchange(soci::into(account.balance));
-    st.exchange(soci::into(account.seqNum));
-    st.exchange(soci::into(account.numSubEntries));
-    st.exchange(soci::into(inflationDest, inflationDestInd));
-    st.exchange(soci::into(homeDomain));
-    st.exchange(soci::into(thresholds));
-    st.exchange(soci::into(account.flags));
-    st.exchange(soci::into(le.lastModifiedLedgerSeq));
-    st.exchange(soci::into(signers, signersInd));
-    st.exchange(soci::into(extensionStr, extensionInd));
-    st.exchange(soci::into(ledgerExtStr, ledgerExtInd));
-    st.exchange(soci::use(actIDStrKey));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("account");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    account.accountID = key.account().accountID;
-    decoder::decode_b64(homeDomain, account.homeDomain);
-
-    bn::decode_b64(thresholds.begin(), thresholds.end(),
-                   account.thresholds.begin());
-
-    if (inflationDestInd == soci::i_ok)
-    {
-        account.inflationDest.activate() =
-            KeyUtils::fromStrKey<PublicKey>(inflationDest);
-    }
-
-    if (signersInd == soci::i_ok)
-    {
-        std::vector<uint8_t> signersOpaque;
-        decoder::decode_b64(signers, signersOpaque);
-        xdr::xdr_from_opaque(signersOpaque, account.signers);
-        releaseAssert(
-            std::adjacent_find(account.signers.begin(), account.signers.end(),
-                               [](Signer const& lhs, Signer const& rhs) {
-                                   return !(lhs.key < rhs.key);
-                               }) == account.signers.end());
-    }
-
-    decodeOpaqueXDR(extensionStr, extensionInd, account.ext);
-
-    decodeOpaqueXDR(ledgerExtStr, ledgerExtInd, le.ext);
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-std::vector<InflationWinner>
-LedgerTxnRoot::Impl::loadInflationWinners(size_t maxWinners,
-                                          int64_t minBalance) const
-{
-    InflationWinner w;
-    std::string inflationDest;
-
-    auto prep = mApp.getDatabase().getPreparedStatement(
-        "SELECT sum(balance) AS votes, inflationdest"
-        " FROM accounts WHERE inflationdest IS NOT NULL"
-        " AND balance >= 1000000000 GROUP BY inflationdest"
-        " ORDER BY votes DESC, inflationdest DESC LIMIT :lim");
-    auto& st = prep.statement();
-    st.exchange(soci::into(w.votes));
-    st.exchange(soci::into(inflationDest));
-    st.exchange(soci::use(maxWinners));
-    st.define_and_bind();
-    st.execute(true);
-
-    std::vector<InflationWinner> winners;
-    while (st.got_data())
-    {
-        w.accountID = KeyUtils::fromStrKey<PublicKey>(inflationDest);
-        if (w.votes < minBalance)
-        {
-            break;
-        }
-        winners.push_back(w);
-        st.fetch();
-    }
-    return winners;
-}
-
-class BulkUpsertAccountsOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDB;
-    std::vector<std::string> mAccountIDs;
-    std::vector<int64_t> mBalances;
-    std::vector<int64_t> mSeqNums;
-    std::vector<int32_t> mSubEntryNums;
-    std::vector<std::string> mInflationDests;
-    std::vector<soci::indicator> mInflationDestInds;
-    std::vector<int32_t> mFlags;
-    std::vector<std::string> mHomeDomains;
-    std::vector<std::string> mThresholds;
-    std::vector<std::string> mSigners;
-    std::vector<soci::indicator> mSignerInds;
-    std::vector<int32_t> mLastModifieds;
-    std::vector<std::string> mExtensions;
-    std::vector<soci::indicator> mExtensionInds;
-    std::vector<std::string> mLedgerExtensions;
-
-  public:
-    BulkUpsertAccountsOperation(Database& DB,
-                                std::vector<EntryIterator> const& entries)
-        : mDB(DB)
-    {
-        mAccountIDs.reserve(entries.size());
-        mBalances.reserve(entries.size());
-        mSeqNums.reserve(entries.size());
-        mSubEntryNums.reserve(entries.size());
-        mInflationDests.reserve(entries.size());
-        mInflationDestInds.reserve(entries.size());
-        mFlags.reserve(entries.size());
-        mHomeDomains.reserve(entries.size());
-        mThresholds.reserve(entries.size());
-        mSigners.reserve(entries.size());
-        mSignerInds.reserve(entries.size());
-        mLastModifieds.reserve(entries.size());
-        mExtensions.reserve(entries.size());
-        mExtensionInds.reserve(entries.size());
-        mLedgerExtensions.reserve(entries.size());
-
-        for (auto const& e : entries)
-        {
-            releaseAssert(e.entryExists());
-            releaseAssert(e.entry().type() ==
-                          InternalLedgerEntryType::LEDGER_ENTRY);
-            auto const& le = e.entry().ledgerEntry();
-            releaseAssert(le.data.type() == ACCOUNT);
-            auto const& account = le.data.account();
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(account.accountID));
-            mBalances.emplace_back(account.balance);
-            mSeqNums.emplace_back(account.seqNum);
-            mSubEntryNums.emplace_back(unsignedToSigned(account.numSubEntries));
-
-            if (account.inflationDest)
-            {
-                mInflationDests.emplace_back(
-                    KeyUtils::toStrKey(*account.inflationDest));
-                mInflationDestInds.emplace_back(soci::i_ok);
-            }
-            else
-            {
-                mInflationDests.emplace_back("");
-                mInflationDestInds.emplace_back(soci::i_null);
-            }
-            mFlags.emplace_back(unsignedToSigned(account.flags));
-            mHomeDomains.emplace_back(decoder::encode_b64(account.homeDomain));
-            mThresholds.emplace_back(decoder::encode_b64(account.thresholds));
-            if (account.signers.empty())
-            {
-                mSigners.emplace_back("");
-                mSignerInds.emplace_back(soci::i_null);
-            }
-            else
-            {
-                mSigners.emplace_back(
-                    decoder::encode_b64(xdr::xdr_to_opaque(account.signers)));
-                mSignerInds.emplace_back(soci::i_ok);
-            }
-            mLastModifieds.emplace_back(
-                unsignedToSigned(le.lastModifiedLedgerSeq));
-
-            if (account.ext.v() >= 1)
-            {
-                mExtensions.emplace_back(
-                    decoder::encode_b64(xdr::xdr_to_opaque(account.ext)));
-                mExtensionInds.emplace_back(soci::i_ok);
-            }
-            else
-            {
-                mExtensions.emplace_back("");
-                mExtensionInds.emplace_back(soci::i_null);
-            }
-
-            mLedgerExtensions.emplace_back(
-                decoder::encode_b64(xdr::xdr_to_opaque(le.ext)));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql =
-            "INSERT INTO accounts ( "
-            "accountid, balance, seqnum, numsubentries, inflationdest,"
-            "homedomain, thresholds, signers, flags, lastmodified, "
-            "extension, ledgerext "
-            ") VALUES ( "
-            ":id, :v1, :v2, :v3, :v4, :v5, :v6, :v7, :v8, :v9, :v10, :v11 "
-            ") ON CONFLICT (accountid) DO UPDATE SET "
-            "balance = excluded.balance, "
-            "seqnum = excluded.seqnum, "
-            "numsubentries = excluded.numsubentries, "
-            "inflationdest = excluded.inflationdest, "
-            "homedomain = excluded.homedomain, "
-            "thresholds = excluded.thresholds, "
-            "signers = excluded.signers, "
-            "flags = excluded.flags, "
-            "lastmodified = excluded.lastmodified, "
-            "extension = excluded.extension, "
-            "ledgerext = excluded.ledgerext";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mAccountIDs));
-        st.exchange(soci::use(mBalances));
-        st.exchange(soci::use(mSeqNums));
-        st.exchange(soci::use(mSubEntryNums));
-        st.exchange(soci::use(mInflationDests, mInflationDestInds));
-        st.exchange(soci::use(mHomeDomains));
-        st.exchange(soci::use(mThresholds));
-        st.exchange(soci::use(mSigners, mSignerInds));
-        st.exchange(soci::use(mFlags));
-        st.exchange(soci::use(mLastModifieds));
-        st.exchange(soci::use(mExtensions, mExtensionInds));
-        st.exchange(soci::use(mLedgerExtensions));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getUpsertTimer("account");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strAccountIDs, strBalances, strSeqNums, strSubEntryNums,
-            strInflationDests, strFlags, strHomeDomains, strThresholds,
-            strSigners, strLastModifieds, strExtensions, strLedgerExtensions;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strAccountIDs, mAccountIDs);
-        marshalToPGArray(conn, strBalances, mBalances);
-        marshalToPGArray(conn, strSeqNums, mSeqNums);
-        marshalToPGArray(conn, strSubEntryNums, mSubEntryNums);
-        marshalToPGArray(conn, strInflationDests, mInflationDests,
-                         &mInflationDestInds);
-        marshalToPGArray(conn, strFlags, mFlags);
-        marshalToPGArray(conn, strHomeDomains, mHomeDomains);
-        marshalToPGArray(conn, strThresholds, mThresholds);
-        marshalToPGArray(conn, strSigners, mSigners, &mSignerInds);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-        marshalToPGArray(conn, strExtensions, mExtensions, &mExtensionInds);
-        marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions);
-
-        std::string sql = "WITH r AS (SELECT "
-                          "unnest(:ids::TEXT[]), "
-                          "unnest(:v1::BIGINT[]), "
-                          "unnest(:v2::BIGINT[]), "
-                          "unnest(:v3::INT[]), "
-                          "unnest(:v4::TEXT[]), "
-                          "unnest(:v5::TEXT[]), "
-                          "unnest(:v6::TEXT[]), "
-                          "unnest(:v7::TEXT[]), "
-                          "unnest(:v8::INT[]), "
-                          "unnest(:v9::INT[]), "
-                          "unnest(:v10::TEXT[]), "
-                          "unnest(:v11::TEXT[]) "
-                          ")"
-                          "INSERT INTO accounts ( "
-                          "accountid, balance, seqnum, "
-                          "numsubentries, inflationdest, homedomain, "
-                          "thresholds, signers, "
-                          "flags, lastmodified, extension, "
-                          "ledgerext "
-                          ") SELECT * FROM r "
-                          "ON CONFLICT (accountid) DO UPDATE SET "
-                          "balance = excluded.balance, "
-                          "seqnum = excluded.seqnum, "
-                          "numsubentries = excluded.numsubentries, "
-                          "inflationdest = excluded.inflationdest, "
-                          "homedomain = excluded.homedomain, "
-                          "thresholds = excluded.thresholds, "
-                          "signers = excluded.signers, "
-                          "flags = excluded.flags, "
-                          "lastmodified = excluded.lastmodified, "
-                          "extension = excluded.extension, "
-                          "ledgerext = excluded.ledgerext";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strBalances));
-        st.exchange(soci::use(strSeqNums));
-        st.exchange(soci::use(strSubEntryNums));
-        st.exchange(soci::use(strInflationDests));
-        st.exchange(soci::use(strHomeDomains));
-        st.exchange(soci::use(strThresholds));
-        st.exchange(soci::use(strSigners));
-        st.exchange(soci::use(strFlags));
-        st.exchange(soci::use(strLastModifieds));
-        st.exchange(soci::use(strExtensions));
-        st.exchange(soci::use(strLedgerExtensions));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getUpsertTimer("account");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-class BulkDeleteAccountsOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDB;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mAccountIDs;
-
-  public:
-    BulkDeleteAccountsOperation(Database& DB, LedgerTxnConsistency cons,
-                                std::vector<EntryIterator> const& entries)
-        : mDB(DB), mCons(cons)
-    {
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            releaseAssert(e.key().type() ==
-                          InternalLedgerEntryType::LEDGER_ENTRY);
-            releaseAssert(e.key().ledgerKey().type() == ACCOUNT);
-            auto const& account = e.key().ledgerKey().account();
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(account.accountID));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM accounts WHERE accountid = :id";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mAccountIDs));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getDeleteTimer("account");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        PGconn* conn = pg->conn_;
-        std::string strAccountIDs;
-        marshalToPGArray(conn, strAccountIDs, mAccountIDs);
-        std::string sql =
-            "WITH r AS (SELECT unnest(:ids::TEXT[])) "
-            "DELETE FROM accounts WHERE accountid IN (SELECT * FROM r)";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getDeleteTimer("account");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertAccounts(
-    std::vector<EntryIterator> const& entries)
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(entries.size()));
-    BulkUpsertAccountsOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::bulkDeleteAccounts(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(entries.size()));
-    BulkDeleteAccountsOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropAccounts(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS accounts;";
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS signers;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE accounts"
-            << "("
-            << "accountid          VARCHAR(56)  " << coll << " PRIMARY KEY,"
-            << "balance            BIGINT       NOT NULL CHECK (balance >= 0),"
-               "buyingliabilities  BIGINT CHECK (buyingliabilities >= 0),"
-               "sellingliabilities BIGINT CHECK (sellingliabilities >= 0),"
-               "seqnum             BIGINT       NOT NULL,"
-               "numsubentries      INT          NOT NULL CHECK (numsubentries "
-               ">= 0),"
-               "inflationdest      VARCHAR(56),"
-               "homedomain         VARCHAR(44)  NOT NULL,"
-               "thresholds         TEXT         NOT NULL,"
-               "flags              INT          NOT NULL,"
-               "signers            TEXT,"
-               "lastmodified       INT          NOT NULL,"
-               "extension          TEXT,"
-               "ledgerext          TEXT         NOT NULL"
-               ");";
-        if (!mApp.getDatabase().isSqlite())
-        {
-            mApp.getDatabase().getSession() << "ALTER TABLE accounts "
-                                            << "ALTER COLUMN accountid "
-                                            << "TYPE VARCHAR(56) COLLATE \"C\"";
-        }
-    }
-}
-
-class BulkLoadAccountsOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mAccountIDs;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string accountID, inflationDest, homeDomain, thresholds, signers;
-        int64_t balance;
-        uint64_t seqNum;
-        uint32_t numSubEntries, flags, lastModified;
-        std::string extension;
-        soci::indicator inflationDestInd, signersInd, extensionInd;
-        std::string ledgerExtension;
-        soci::indicator ledgerExtInd;
-
-        st.exchange(soci::into(accountID));
-        st.exchange(soci::into(balance));
-        st.exchange(soci::into(seqNum));
-        st.exchange(soci::into(numSubEntries));
-        st.exchange(soci::into(inflationDest, inflationDestInd));
-        st.exchange(soci::into(homeDomain));
-        st.exchange(soci::into(thresholds));
-        st.exchange(soci::into(flags));
-        st.exchange(soci::into(lastModified));
-        st.exchange(soci::into(extension, extensionInd));
-        st.exchange(soci::into(signers, signersInd));
-        st.exchange(soci::into(ledgerExtension, ledgerExtInd));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("account");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-            le.data.type(ACCOUNT);
-            auto& ae = le.data.account();
-
-            ae.accountID = KeyUtils::fromStrKey<PublicKey>(accountID);
-            ae.balance = balance;
-            ae.seqNum = seqNum;
-            ae.numSubEntries = numSubEntries;
-
-            if (inflationDestInd == soci::i_ok)
-            {
-                ae.inflationDest.activate() =
-                    KeyUtils::fromStrKey<PublicKey>(inflationDest);
-            }
-
-            decoder::decode_b64(homeDomain, ae.homeDomain);
-
-            bn::decode_b64(thresholds.begin(), thresholds.end(),
-                           ae.thresholds.begin());
-
-            if (inflationDestInd == soci::i_ok)
-            {
-                ae.inflationDest.activate() =
-                    KeyUtils::fromStrKey<PublicKey>(inflationDest);
-            }
-
-            ae.flags = flags;
-            le.lastModifiedLedgerSeq = lastModified;
-
-            decodeOpaqueXDR(extension, extensionInd, ae.ext);
-
-            if (signersInd == soci::i_ok)
-            {
-                std::vector<uint8_t> signersOpaque;
-                decoder::decode_b64(signers, signersOpaque);
-                xdr::xdr_from_opaque(signersOpaque, ae.signers);
-                releaseAssert(std::adjacent_find(
-                                  ae.signers.begin(), ae.signers.end(),
-                                  [](Signer const& lhs, Signer const& rhs) {
-                                      return !(lhs.key < rhs.key);
-                                  }) == ae.signers.end());
-            }
-
-            decodeOpaqueXDR(ledgerExtension, ledgerExtInd, le.ext);
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadAccountsOperation(Database& db, UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mAccountIDs.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            releaseAssert(k.type() == ACCOUNT);
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(k.account().accountID));
-        }
-    }
-
-    virtual std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::vector<char const*> accountIDcstrs;
-        accountIDcstrs.reserve(mAccountIDs.size());
-        for (auto const& acc : mAccountIDs)
-        {
-            accountIDcstrs.emplace_back(acc.c_str());
-        }
-
-        std::string sql =
-            "SELECT accountid, balance, seqnum, numsubentries, "
-            "inflationdest, homedomain, thresholds, flags, lastmodified, "
-            "extension, signers, ledgerext"
-            " FROM accounts "
-            "WHERE accountid IN carray(?, ?, 'char*')";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, accountIDcstrs.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(accountIDcstrs.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    virtual std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strAccountIDs;
-        marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs);
-
-        std::string sql =
-            "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-            "SELECT accountid, balance, seqnum, numsubentries, "
-            "inflationdest, homedomain, thresholds, flags, lastmodified, "
-            "extension, signers, ledgerext"
-            " FROM accounts "
-            "WHERE accountid IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadAccounts(UnorderedSet<LedgerKey> const& keys) const
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(keys.size()));
-    if (!keys.empty())
-    {
-        BulkLoadAccountsOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-}
diff --git a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp b/src/ledger/LedgerTxnClaimableBalanceSQL.cpp
deleted file mode 100644
index e952589209..0000000000
--- a/src/ledger/LedgerTxnClaimableBalanceSQL.cpp
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2020 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/types.h"
-
-namespace stellar
-{
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadClaimableBalance(LedgerKey const& key) const
-{
-    auto balanceID = toOpaqueBase64(key.claimableBalance().balanceID);
-
-    std::string claimableBalanceEntryStr;
-    LedgerEntry le;
-
-    std::string sql = "SELECT ledgerentry "
-                      "FROM claimablebalance "
-                      "WHERE balanceid= :balanceid";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(claimableBalanceEntryStr));
-    st.exchange(soci::use(balanceID));
-    st.define_and_bind();
-    st.execute(true);
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    fromOpaqueBase64(le, claimableBalanceEntryStr);
-    releaseAssert(le.data.type() == CLAIMABLE_BALANCE);
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-class BulkLoadClaimableBalanceOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mBalanceIDs;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string balanceIdStr, claimableBalanceEntryStr;
-
-        st.exchange(soci::into(balanceIdStr));
-        st.exchange(soci::into(claimableBalanceEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("claimablebalance");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, claimableBalanceEntryStr);
-            releaseAssert(le.data.type() == CLAIMABLE_BALANCE);
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadClaimableBalanceOperation(Database& db,
-                                      UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mBalanceIDs.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            releaseAssert(k.type() == CLAIMABLE_BALANCE);
-            mBalanceIDs.emplace_back(
-                toOpaqueBase64(k.claimableBalance().balanceID));
-        }
-    }
-
-    std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::vector<char const*> cstrBalanceIDs;
-        cstrBalanceIDs.reserve(mBalanceIDs.size());
-        for (size_t i = 0; i < mBalanceIDs.size(); ++i)
-        {
-            cstrBalanceIDs.emplace_back(mBalanceIDs[i].c_str());
-        }
-
-        std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'char*')) "
-                          "SELECT balanceid, ledgerentry "
-                          "FROM claimablebalance "
-                          "WHERE balanceid IN r";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, cstrBalanceIDs.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(cstrBalanceIDs.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strBalanceIDs;
-        marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "SELECT balanceid, ledgerentry "
-                          "FROM claimablebalance "
-                          "WHERE balanceid IN (SELECT * from r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strBalanceIDs));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadClaimableBalance(
-    UnorderedSet<LedgerKey> const& keys) const
-{
-    if (!keys.empty())
-    {
-        BulkLoadClaimableBalanceOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-
-class BulkDeleteClaimableBalanceOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mBalanceIDs;
-
-  public:
-    BulkDeleteClaimableBalanceOperation(
-        Database& db, LedgerTxnConsistency cons,
-        std::vector<EntryIterator> const& entries)
-        : mDb(db), mCons(cons)
-    {
-        mBalanceIDs.reserve(entries.size());
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            releaseAssert(e.key().ledgerKey().type() == CLAIMABLE_BALANCE);
-            mBalanceIDs.emplace_back(toOpaqueBase64(
-                e.key().ledgerKey().claimableBalance().balanceID));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM claimablebalance WHERE balanceid = :id";
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(mBalanceIDs));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("claimablebalance");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mBalanceIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strBalanceIDs;
-        marshalToPGArray(pg->conn_, strBalanceIDs, mBalanceIDs);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "DELETE FROM claimablebalance "
-                          "WHERE balanceid IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strBalanceIDs));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("claimablebalance");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mBalanceIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkDeleteClaimableBalance(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    BulkDeleteClaimableBalanceOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-class BulkUpsertClaimableBalanceOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    std::vector<std::string> mBalanceIDs;
-    std::vector<std::string> mClaimableBalanceEntrys;
-    std::vector<int32_t> mLastModifieds;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        releaseAssert(entry.data.type() == CLAIMABLE_BALANCE);
-        mBalanceIDs.emplace_back(
-            toOpaqueBase64(entry.data.claimableBalance().balanceID));
-        mClaimableBalanceEntrys.emplace_back(toOpaqueBase64(entry));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-    }
-
-  public:
-    BulkUpsertClaimableBalanceOperation(
-        Database& Db, std::vector<EntryIterator> const& entryIter)
-        : mDb(Db)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "INSERT INTO claimablebalance "
-                          "(balanceid, ledgerentry, lastmodified) "
-                          "VALUES "
-                          "( :id, :v1, :v2 ) "
-                          "ON CONFLICT (balanceid) DO UPDATE SET "
-                          "balanceid = excluded.balanceid, ledgerentry = "
-                          "excluded.ledgerentry, lastmodified = "
-                          "excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mBalanceIDs));
-        st.exchange(soci::use(mClaimableBalanceEntrys));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("claimablebalance");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mBalanceIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strBalanceIDs, strClaimableBalanceEntry, strLastModifieds;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strBalanceIDs, mBalanceIDs);
-        marshalToPGArray(conn, strClaimableBalanceEntry,
-                         mClaimableBalanceEntrys);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql = "WITH r AS "
-                          "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), "
-                          "unnest(:v2::INT[]))"
-                          "INSERT INTO claimablebalance "
-                          "(balanceid, ledgerentry, lastmodified) "
-                          "SELECT * FROM r "
-                          "ON CONFLICT (balanceid) DO UPDATE SET "
-                          "balanceid = excluded.balanceid, ledgerentry = "
-                          "excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strBalanceIDs));
-        st.exchange(soci::use(strClaimableBalanceEntry));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("claimablebalance");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mBalanceIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertClaimableBalance(
-    std::vector<EntryIterator> const& entries)
-{
-    BulkUpsertClaimableBalanceOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropClaimableBalances(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS claimablebalance;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE claimablebalance ("
-            << "balanceid             VARCHAR(48) " << coll << " PRIMARY KEY, "
-            << "ledgerentry TEXT NOT NULL, "
-            << "lastmodified          INT NOT NULL);";
-    }
-}
-}
diff --git a/src/ledger/LedgerTxnConfigSettingSQL.cpp b/src/ledger/LedgerTxnConfigSettingSQL.cpp
deleted file mode 100644
index d06282e203..0000000000
--- a/src/ledger/LedgerTxnConfigSettingSQL.cpp
+++ /dev/null
@@ -1,294 +0,0 @@
-// Copyright 2022 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "ledger/NonSociRelatedException.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/types.h"
-
-namespace stellar
-{
-
-static void
-throwIfNotConfigSetting(LedgerEntryType type)
-{
-    if (type != CONFIG_SETTING)
-    {
-        throw NonSociRelatedException("LedgerEntry is not a CONFIG_SETTING");
-    }
-}
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadConfigSetting(LedgerKey const& key) const
-{
-    int32_t configSettingID = key.configSetting().configSettingID;
-    std::string configSettingEntryStr;
-
-    std::string sql = "SELECT ledgerentry "
-                      "FROM configsettings "
-                      "WHERE configsettingid = :configsettingid";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(configSettingEntryStr));
-    st.exchange(soci::use(configSettingID));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("configsetting");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    LedgerEntry le;
-    fromOpaqueBase64(le, configSettingEntryStr);
-    throwIfNotConfigSetting(le.data.type());
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-class bulkLoadConfigSettingsOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<int32_t> mConfigSettingIDs;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string configSettingEntryStr;
-
-        st.exchange(soci::into(configSettingEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("configsetting");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, configSettingEntryStr);
-            throwIfNotConfigSetting(le.data.type());
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    bulkLoadConfigSettingsOperation(Database& db,
-                                    UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mConfigSettingIDs.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            throwIfNotConfigSetting(k.type());
-            mConfigSettingIDs.emplace_back(k.configSetting().configSettingID);
-        }
-    }
-
-    std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'int32')) "
-                          "SELECT ledgerentry "
-                          "FROM configsettings "
-                          "WHERE configsettingid IN r";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, (void*)mConfigSettingIDs.data(), "carray",
-                             0);
-        sqlite3_bind_int(st, 2, static_cast<int>(mConfigSettingIDs.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strConfigSettingIDs;
-        marshalToPGArray(pg->conn_, strConfigSettingIDs, mConfigSettingIDs);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::INT[])) "
-                          "SELECT ledgerentry "
-                          "FROM configsettings "
-                          "WHERE configsettingid IN (SELECT * from r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strConfigSettingIDs));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadConfigSettings(
-    UnorderedSet<LedgerKey> const& keys) const
-{
-    if (!keys.empty())
-    {
-        bulkLoadConfigSettingsOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-
-class bulkUpsertConfigSettingsOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    std::vector<int32_t> mConfigSettingIDs;
-    std::vector<std::string> mConfigSettingEntries;
-    std::vector<int32_t> mLastModifieds;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        throwIfNotConfigSetting(entry.data.type());
-
-        mConfigSettingIDs.emplace_back(
-            entry.data.configSetting().configSettingID());
-        mConfigSettingEntries.emplace_back(toOpaqueBase64(entry));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-    }
-
-  public:
-    bulkUpsertConfigSettingsOperation(
-        Database& Db, std::vector<EntryIterator> const& entryIter)
-        : mDb(Db)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "INSERT INTO configsettings "
-                          "(configsettingid, ledgerentry, lastmodified) "
-                          "VALUES "
-                          "( :id, :v1, :v2 ) "
-                          "ON CONFLICT (configsettingid) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mConfigSettingIDs));
-        st.exchange(soci::use(mConfigSettingEntries));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("configsetting");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) !=
-            mConfigSettingIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strConfigSettingIDs, strConfigSettingEntries,
-            strLastModifieds;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strConfigSettingIDs, mConfigSettingIDs);
-        marshalToPGArray(conn, strConfigSettingEntries, mConfigSettingEntries);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql = "WITH r AS "
-                          "(SELECT unnest(:ids::INT[]), unnest(:v1::TEXT[]), "
-                          "unnest(:v2::INT[])) "
-                          "INSERT INTO configsettings "
-                          "(configsettingid, ledgerentry, lastmodified) "
-                          "SELECT * FROM r "
-                          "ON CONFLICT (configsettingid) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strConfigSettingIDs));
-        st.exchange(soci::use(strConfigSettingEntries));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("configsetting");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) !=
-            mConfigSettingIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertConfigSettings(
-    std::vector<EntryIterator> const& entries)
-{
-    bulkUpsertConfigSettingsOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropConfigSettings(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS configsettings;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE configsettings ("
-            << "configsettingid INT PRIMARY KEY, "
-            << "ledgerentry  TEXT " << coll << " NOT NULL, "
-            << "lastmodified INT NOT NULL);";
-    }
-}
-}
\ No newline at end of file
diff --git a/src/ledger/LedgerTxnContractCodeSQL.cpp b/src/ledger/LedgerTxnContractCodeSQL.cpp
deleted file mode 100644
index 0421e8996c..0000000000
--- a/src/ledger/LedgerTxnContractCodeSQL.cpp
+++ /dev/null
@@ -1,386 +0,0 @@
-// Copyright 2022 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "ledger/NonSociRelatedException.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/types.h"
-
-namespace stellar
-{
-
-static void
-throwIfNotContractCode(LedgerEntryType type)
-{
-    if (type != CONTRACT_CODE)
-    {
-        throw NonSociRelatedException("LedgerEntry is not a CONTRACT_CODE");
-    }
-}
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadContractCode(LedgerKey const& k) const
-{
-    auto hash = toOpaqueBase64(k.contractCode().hash);
-    std::string contractCodeEntryStr;
-
-    std::string sql = "SELECT ledgerentry "
-                      "FROM contractcode "
-                      "WHERE hash = :hash";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(contractCodeEntryStr));
-    st.exchange(soci::use(hash));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("contractcode");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    LedgerEntry le;
-    fromOpaqueBase64(le, contractCodeEntryStr);
-    throwIfNotContractCode(le.data.type());
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-class BulkLoadContractCodeOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mHashes;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string contractCodeEntryStr;
-
-        st.exchange(soci::into(contractCodeEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("contractcode");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, contractCodeEntryStr);
-            throwIfNotContractCode(le.data.type());
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadContractCodeOperation(Database& db,
-                                  UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mHashes.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            throwIfNotContractCode(k.type());
-            mHashes.emplace_back(toOpaqueBase64(k.contractCode().hash));
-        }
-    }
-
-    std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::vector<char const*> cStrHashes;
-        cStrHashes.reserve(mHashes.size());
-        for (auto const& h : mHashes)
-        {
-            cStrHashes.emplace_back(h.c_str());
-        }
-        std::string sql = "SELECT ledgerentry "
-                          "FROM contractcode "
-                          "WHERE hash IN carray(?, ?, 'char*')";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, (void*)cStrHashes.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(cStrHashes.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strHashes;
-        marshalToPGArray(pg->conn_, strHashes, mHashes);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "SELECT ledgerentry "
-                          "FROM contractcode "
-                          "WHERE (hash) IN (SELECT * from r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strHashes));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadContractCode(
-    UnorderedSet<LedgerKey> const& keys) const
-{
-    if (!keys.empty())
-    {
-        BulkLoadContractCodeOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-
-class BulkDeleteContractCodeOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mHashes;
-
-  public:
-    BulkDeleteContractCodeOperation(Database& db, LedgerTxnConsistency cons,
-                                    std::vector<EntryIterator> const& entries)
-        : mDb(db), mCons(cons)
-    {
-        mHashes.reserve(entries.size());
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            throwIfNotContractCode(e.key().ledgerKey().type());
-            mHashes.emplace_back(
-                toOpaqueBase64(e.key().ledgerKey().contractCode().hash));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM contractcode WHERE hash = :id";
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(mHashes));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("contractcode");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mHashes.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strHashes;
-        marshalToPGArray(pg->conn_, strHashes, mHashes);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "DELETE FROM contractcode "
-                          "WHERE hash IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strHashes));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("contractcode");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mHashes.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkDeleteContractCode(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    BulkDeleteContractCodeOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-class BulkUpsertContractCodeOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    std::vector<std::string> mHashes;
-    std::vector<std::string> mContractCodeEntries;
-    std::vector<int32_t> mLastModifieds;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        throwIfNotContractCode(entry.data.type());
-
-        mHashes.emplace_back(toOpaqueBase64(entry.data.contractCode().hash));
-        mContractCodeEntries.emplace_back(toOpaqueBase64(entry));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-    }
-
-  public:
-    BulkUpsertContractCodeOperation(Database& Db,
-                                    std::vector<EntryIterator> const& entryIter)
-        : mDb(Db)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "INSERT INTO contractCode "
-                          "(hash, ledgerentry, lastmodified) "
-                          "VALUES "
-                          "( :hash, :v1, :v2 ) "
-                          "ON CONFLICT (hash) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mHashes));
-        st.exchange(soci::use(mContractCodeEntries));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("contractcode");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mHashes.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strHashes, strContractCodeEntries, strLastModifieds;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strHashes, mHashes);
-        marshalToPGArray(conn, strContractCodeEntries, mContractCodeEntries);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql = "WITH r AS "
-                          "(SELECT unnest(:v1::TEXT[]), "
-                          "unnest(:v1::TEXT[]), unnest(:v2::INT[])) "
-                          "INSERT INTO contractcode "
-                          "(hash, ledgerentry, lastmodified) "
-                          "SELECT * FROM r "
-                          "ON CONFLICT (hash) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strHashes));
-        st.exchange(soci::use(strContractCodeEntries));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("contractcode");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mHashes.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertContractCode(
-    std::vector<EntryIterator> const& entries)
-{
-    BulkUpsertContractCodeOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropContractCode(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    std::string coll = mApp.getDatabase().getSimpleCollationClause();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractcode;";
-
-    if (rebuild)
-    {
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE contractcode ("
-            << "hash   TEXT " << coll << " NOT NULL, "
-            << "ledgerentry  TEXT " << coll << " NOT NULL, "
-            << "lastmodified INT NOT NULL, "
-            << "PRIMARY KEY (hash));";
-        if (!mApp.getDatabase().isSqlite())
-        {
-            mApp.getDatabase().getSession() << "ALTER TABLE contractcode "
-                                            << "ALTER COLUMN hash "
-                                            << "TYPE TEXT COLLATE \"C\";";
-        }
-    }
-}
-
-}
diff --git a/src/ledger/LedgerTxnContractDataSQL.cpp b/src/ledger/LedgerTxnContractDataSQL.cpp
deleted file mode 100644
index a7f716a561..0000000000
--- a/src/ledger/LedgerTxnContractDataSQL.cpp
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2022 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "ledger/NonSociRelatedException.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/types.h"
-
-namespace stellar
-{
-
-static void
-throwIfNotContractData(LedgerEntryType type)
-{
-    if (type != CONTRACT_DATA)
-    {
-        throw NonSociRelatedException("LedgerEntry is not a CONTRACT_DATA");
-    }
-}
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadContractData(LedgerKey const& k) const
-{
-    auto contractID = toOpaqueBase64(k.contractData().contract);
-    auto key = toOpaqueBase64(k.contractData().key);
-    int32_t type = k.contractData().durability;
-    std::string contractDataEntryStr;
-
-    std::string sql =
-        "SELECT ledgerentry "
-        "FROM contractdata "
-        "WHERE contractID = :contractID AND key = :key AND type = :type";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(contractDataEntryStr));
-    st.exchange(soci::use(contractID));
-    st.exchange(soci::use(key));
-    st.exchange(soci::use(type));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("contractdata");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    LedgerEntry le;
-    fromOpaqueBase64(le, contractDataEntryStr);
-    throwIfNotContractData(le.data.type());
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-class BulkLoadContractDataOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mContractIDs;
-    std::vector<std::string> mKeys;
-    std::vector<int32_t> mTypes;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string contractDataEntryStr;
-
-        st.exchange(soci::into(contractDataEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("contractdata");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, contractDataEntryStr);
-            throwIfNotContractData(le.data.type());
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadContractDataOperation(Database& db,
-                                  UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mContractIDs.reserve(keys.size());
-        mKeys.reserve(keys.size());
-        mTypes.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            throwIfNotContractData(k.type());
-            mContractIDs.emplace_back(
-                toOpaqueBase64(k.contractData().contract));
-            mKeys.emplace_back(toOpaqueBase64(k.contractData().key));
-            mTypes.emplace_back(k.contractData().durability);
-        }
-    }
-
-    std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::vector<char const*> cStrContractIDs, cStrKeys;
-        cStrContractIDs.reserve(mContractIDs.size());
-        cStrKeys.reserve(cStrKeys.size());
-        for (auto const& cid : mContractIDs)
-        {
-            cStrContractIDs.emplace_back(cid.c_str());
-        }
-        for (auto const& key : mKeys)
-        {
-            cStrKeys.emplace_back(key.c_str());
-        }
-
-        std::string sqlJoin = "SELECT x.value, y.value, z.value "
-                              "FROM "
-                              "(SELECT rowid, value FROM carray(?, ?, 'char*') "
-                              "ORDER BY rowid) AS x "
-                              "INNER JOIN "
-                              "(SELECT rowid, value FROM carray(?, ?, 'char*') "
-                              "ORDER BY rowid) AS y "
-                              "ON x.rowid = y.rowid "
-                              "INNER JOIN "
-                              "(SELECT rowid, value FROM carray(?, ?, 'int32') "
-                              "ORDER BY rowid) AS z "
-                              "ON x.rowid = z.rowid";
-
-        std::string sql = "WITH r AS  (" + sqlJoin +
-                          ") "
-                          "SELECT ledgerentry "
-                          "FROM contractdata "
-                          "WHERE (contractid, key, type) IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, (void*)cStrContractIDs.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(mContractIDs.size()));
-        sqlite3_bind_pointer(st, 3, (void*)cStrKeys.data(), "carray", 0);
-        sqlite3_bind_int(st, 4, static_cast<int>(mKeys.size()));
-        sqlite3_bind_pointer(st, 5, (void*)mTypes.data(), "carray", 0);
-        sqlite3_bind_int(st, 6, static_cast<int>(mTypes.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strContractIDs, strKeys, strTypes;
-        marshalToPGArray(pg->conn_, strContractIDs, mContractIDs);
-        marshalToPGArray(pg->conn_, strKeys, mKeys);
-        marshalToPGArray(pg->conn_, strTypes, mTypes);
-
-        std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), "
-                          "unnest(:v1::TEXT[]), unnest(:v2::INT[])) "
-                          "SELECT ledgerentry "
-                          "FROM contractdata "
-                          "WHERE (contractid, key, type) IN (SELECT * from r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strContractIDs));
-        st.exchange(soci::use(strKeys));
-        st.exchange(soci::use(strTypes));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadContractData(
-    UnorderedSet<LedgerKey> const& keys) const
-{
-    if (!keys.empty())
-    {
-        BulkLoadContractDataOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-
-class BulkDeleteContractDataOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mContractIDs;
-    std::vector<std::string> mKeys;
-    std::vector<int32_t> mTypes;
-
-  public:
-    BulkDeleteContractDataOperation(Database& db, LedgerTxnConsistency cons,
-                                    std::vector<EntryIterator> const& entries)
-        : mDb(db), mCons(cons)
-    {
-        mContractIDs.reserve(entries.size());
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            throwIfNotContractData(e.key().ledgerKey().type());
-            mContractIDs.emplace_back(
-                toOpaqueBase64(e.key().ledgerKey().contractData().contract));
-            mKeys.emplace_back(
-                toOpaqueBase64(e.key().ledgerKey().contractData().key));
-            mTypes.emplace_back(e.key().ledgerKey().contractData().durability);
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM contractdata WHERE contractid = :id "
-                          "AND key = :key AND type = :type";
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(mContractIDs));
-        st.exchange(soci::use(mKeys));
-        st.exchange(soci::use(mTypes));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("contractdata");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) !=
-                mContractIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strContractIDs, strKeys, strTypes;
-        marshalToPGArray(pg->conn_, strContractIDs, mContractIDs);
-        marshalToPGArray(pg->conn_, strKeys, mKeys);
-        marshalToPGArray(pg->conn_, strTypes, mTypes);
-
-        std::string sql = "WITH r AS (SELECT unnest(:ids::TEXT[]), "
-                          "unnest(:v1::TEXT[]), unnest(:v2::INT[])) "
-                          "DELETE FROM contractdata "
-                          "WHERE (contractid, key, type) IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strContractIDs));
-        st.exchange(soci::use(strKeys));
-        st.exchange(soci::use(strTypes));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("contractdata");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) !=
-                mContractIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkDeleteContractData(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    BulkDeleteContractDataOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-class BulkUpsertContractDataOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    std::vector<std::string> mContractIDs;
-    std::vector<std::string> mKeys;
-    std::vector<int32_t> mTypes;
-    std::vector<std::string> mContractDataEntries;
-    std::vector<int32_t> mLastModifieds;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        throwIfNotContractData(entry.data.type());
-
-        mContractIDs.emplace_back(
-            toOpaqueBase64(entry.data.contractData().contract));
-        mKeys.emplace_back(toOpaqueBase64(entry.data.contractData().key));
-        mTypes.emplace_back(entry.data.contractData().durability);
-        mContractDataEntries.emplace_back(toOpaqueBase64(entry));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-    }
-
-  public:
-    BulkUpsertContractDataOperation(Database& Db,
-                                    std::vector<EntryIterator> const& entryIter)
-        : mDb(Db)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "INSERT INTO contractData "
-                          "(contractid, key, type, ledgerentry, lastmodified) "
-                          "VALUES "
-                          "( :id, :key, :type, :v1, :v2 ) "
-                          "ON CONFLICT (contractid, key, type) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mContractIDs));
-        st.exchange(soci::use(mKeys));
-        st.exchange(soci::use(mTypes));
-        st.exchange(soci::use(mContractDataEntries));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("contractdata");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mContractIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strContractIDs, strKeys, strTypes, strContractDataEntries,
-            strLastModifieds;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strContractIDs, mContractIDs);
-        marshalToPGArray(conn, strKeys, mKeys);
-        marshalToPGArray(conn, strTypes, mTypes);
-        marshalToPGArray(conn, strContractDataEntries, mContractDataEntries);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql =
-            "WITH r AS "
-            "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), "
-            "unnest(:v2::INT[]), unnest(:v3::TEXT[]), unnest(:v4::INT[])) "
-            "INSERT INTO contractdata "
-            "(contractid, key, type, ledgerentry, lastmodified) "
-            "SELECT * FROM r "
-            "ON CONFLICT (contractid,key,type) DO UPDATE SET "
-            "ledgerentry = excluded.ledgerentry, "
-            "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strContractIDs));
-        st.exchange(soci::use(strKeys));
-        st.exchange(soci::use(strTypes));
-        st.exchange(soci::use(strContractDataEntries));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("contractdata");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mContractIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertContractData(
-    std::vector<EntryIterator> const& entries)
-{
-    BulkUpsertContractDataOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropContractData(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS contractdata;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE contractdata ("
-            << "contractid   TEXT " << coll << " NOT NULL, "
-            << "key TEXT " << coll << " NOT NULL, "
-            << "type INT NOT NULL, "
-            << "ledgerentry  TEXT " << coll << " NOT NULL, "
-            << "lastmodified INT NOT NULL, "
-            << "PRIMARY KEY  (contractid, key, type));";
-        if (!mApp.getDatabase().isSqlite())
-        {
-            mApp.getDatabase().getSession() << "ALTER TABLE contractdata "
-                                            << "ALTER COLUMN contractid "
-                                            << "TYPE TEXT COLLATE \"C\","
-                                            << "ALTER COLUMN key "
-                                            << "TYPE TEXT COLLATE \"C\","
-                                            << "ALTER COLUMN type "
-                                            << "TYPE INT;";
-        }
-    }
-}
-
-}
\ No newline at end of file
diff --git a/src/ledger/LedgerTxnDataSQL.cpp b/src/ledger/LedgerTxnDataSQL.cpp
deleted file mode 100644
index a17a38b208..0000000000
--- a/src/ledger/LedgerTxnDataSQL.cpp
+++ /dev/null
@@ -1,507 +0,0 @@
-// Copyright 2018 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "crypto/KeyUtils.h"
-#include "crypto/SecretKey.h"
-#include "database/Database.h"
-#include "database/DatabaseTypeSpecificOperation.h"
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "main/Application.h"
-#include "util/Decoder.h"
-#include "util/GlobalChecks.h"
-#include "util/Logging.h"
-#include "util/types.h"
-#include <Tracy.hpp>
-
-namespace stellar
-{
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadData(LedgerKey const& key) const
-{
-    ZoneScoped;
-    std::string actIDStrKey = KeyUtils::toStrKey(key.data().accountID);
-    std::string dataName = decoder::encode_b64(key.data().dataName);
-
-    std::string dataValue;
-    soci::indicator dataValueIndicator;
-    std::string extensionStr;
-    soci::indicator extensionInd;
-    std::string ledgerExtStr;
-    soci::indicator ledgerExtInd;
-
-    LedgerEntry le;
-    le.data.type(DATA);
-    DataEntry& de = le.data.data();
-
-    std::string sql = "SELECT datavalue, lastmodified, extension, "
-                      "ledgerext "
-                      "FROM accountdata "
-                      "WHERE accountid= :id AND dataname= :dataname";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(dataValue, dataValueIndicator));
-    st.exchange(soci::into(le.lastModifiedLedgerSeq));
-    st.exchange(soci::into(extensionStr, extensionInd));
-    st.exchange(soci::into(ledgerExtStr, ledgerExtInd));
-    st.exchange(soci::use(actIDStrKey));
-    st.exchange(soci::use(dataName));
-    st.define_and_bind();
-    st.execute(true);
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    de.accountID = key.data().accountID;
-    de.dataName = key.data().dataName;
-
-    if (dataValueIndicator != soci::i_ok)
-    {
-        throw std::runtime_error("bad database state");
-    }
-    decoder::decode_b64(dataValue, de.dataValue);
-
-    decodeOpaqueXDR(extensionStr, extensionInd, de.ext);
-
-    decodeOpaqueXDR(ledgerExtStr, ledgerExtInd, le.ext);
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-class BulkUpsertDataOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDB;
-    std::vector<std::string> mAccountIDs;
-    std::vector<std::string> mDataNames;
-    std::vector<std::string> mDataValues;
-    std::vector<int32_t> mLastModifieds;
-    std::vector<std::string> mExtensions;
-    std::vector<std::string> mLedgerExtensions;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        releaseAssert(entry.data.type() == DATA);
-        DataEntry const& data = entry.data.data();
-        mAccountIDs.emplace_back(KeyUtils::toStrKey(data.accountID));
-        mDataNames.emplace_back(decoder::encode_b64(data.dataName));
-        mDataValues.emplace_back(decoder::encode_b64(data.dataValue));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-        mExtensions.emplace_back(
-            decoder::encode_b64(xdr::xdr_to_opaque(data.ext)));
-        mLedgerExtensions.emplace_back(
-            decoder::encode_b64(xdr::xdr_to_opaque(entry.ext)));
-    }
-
-  public:
-    BulkUpsertDataOperation(Database& DB,
-                            std::vector<LedgerEntry> const& entries)
-        : mDB(DB)
-    {
-        for (auto const& e : entries)
-        {
-            accumulateEntry(e);
-        }
-    }
-
-    BulkUpsertDataOperation(Database& DB,
-                            std::vector<EntryIterator> const& entryIter)
-        : mDB(DB)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            releaseAssert(e.entry().type() ==
-                          InternalLedgerEntryType::LEDGER_ENTRY);
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql =
-            "INSERT INTO accountdata ( "
-            "accountid, dataname, datavalue, lastmodified, extension, "
-            "ledgerext "
-            ") VALUES ( "
-            ":id, :v1, :v2, :v3, :v4, :v5 "
-            ") ON CONFLICT (accountid, dataname) DO UPDATE SET "
-            "datavalue = excluded.datavalue, "
-            "lastmodified = excluded.lastmodified, "
-            "extension = excluded.extension, "
-            "ledgerext = excluded.ledgerext";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mAccountIDs));
-        st.exchange(soci::use(mDataNames));
-        st.exchange(soci::use(mDataValues));
-        st.exchange(soci::use(mLastModifieds));
-        st.exchange(soci::use(mExtensions));
-        st.exchange(soci::use(mLedgerExtensions));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getUpsertTimer("data");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strAccountIDs, strDataNames, strDataValues,
-            strLastModifieds, strExtensions, strLedgerExtensions;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strAccountIDs, mAccountIDs);
-        marshalToPGArray(conn, strDataNames, mDataNames);
-        marshalToPGArray(conn, strDataValues, mDataValues);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-        marshalToPGArray(conn, strExtensions, mExtensions);
-        marshalToPGArray(conn, strLedgerExtensions, mLedgerExtensions);
-        std::string sql =
-            "WITH r AS (SELECT "
-            "unnest(:ids::TEXT[]), "
-            "unnest(:v1::TEXT[]), "
-            "unnest(:v2::TEXT[]), "
-            "unnest(:v3::INT[]), "
-            "unnest(:v4::TEXT[]), "
-            "unnest(:v5::TEXT[]) "
-            ")"
-            "INSERT INTO accountdata ( "
-            "accountid, dataname, datavalue, lastmodified, extension, "
-            "ledgerext "
-            ") SELECT * FROM r "
-            "ON CONFLICT (accountid, dataname) DO UPDATE SET "
-            "datavalue = excluded.datavalue, "
-            "lastmodified = excluded.lastmodified, "
-            "extension = excluded.extension, "
-            "ledgerext = excluded.ledgerext";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strDataNames));
-        st.exchange(soci::use(strDataValues));
-        st.exchange(soci::use(strLastModifieds));
-        st.exchange(soci::use(strExtensions));
-        st.exchange(soci::use(strLedgerExtensions));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getUpsertTimer("data");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-class BulkDeleteDataOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDB;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mAccountIDs;
-    std::vector<std::string> mDataNames;
-
-  public:
-    BulkDeleteDataOperation(Database& DB, LedgerTxnConsistency cons,
-                            std::vector<EntryIterator> const& entries)
-        : mDB(DB), mCons(cons)
-    {
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            releaseAssert(e.key().type() ==
-                          InternalLedgerEntryType::LEDGER_ENTRY);
-            releaseAssert(e.key().ledgerKey().type() == DATA);
-            auto const& data = e.key().ledgerKey().data();
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(data.accountID));
-            mDataNames.emplace_back(decoder::encode_b64(data.dataName));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM accountdata WHERE accountid = :id AND "
-                          " dataname = :v1 ";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mAccountIDs));
-        st.exchange(soci::use(mDataNames));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getDeleteTimer("data");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strAccountIDs;
-        std::string strDataNames;
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strAccountIDs, mAccountIDs);
-        marshalToPGArray(conn, strDataNames, mDataNames);
-        std::string sql =
-            "WITH r AS ( SELECT "
-            "unnest(:ids::TEXT[]),"
-            "unnest(:v1::TEXT[])"
-            " ) "
-            "DELETE FROM accountdata WHERE (accountid, dataname) IN "
-            "(SELECT * FROM r)";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strDataNames));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getDeleteTimer("data");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertAccountData(
-    std::vector<EntryIterator> const& entries)
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(entries.size()));
-    BulkUpsertDataOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::bulkDeleteAccountData(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(entries.size()));
-    BulkDeleteDataOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropData(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS accountdata;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE accountdata"
-            << "("
-            << "accountid    VARCHAR(56) " << coll << " NOT NULL,"
-            << "dataname     VARCHAR(88) " << coll << " NOT NULL,"
-            << "datavalue    VARCHAR(112) NOT NULL,"
-               "lastmodified INT          NOT NULL,"
-               "extension    TEXT,"
-               "ledgerext    TEXT         NOT NULL,"
-               "PRIMARY KEY  (accountid, dataname)"
-               ");";
-        if (!mApp.getDatabase().isSqlite())
-        {
-            mApp.getDatabase().getSession()
-                << "ALTER TABLE accountdata "
-                << "ALTER COLUMN accountid "
-                << "TYPE VARCHAR(56) COLLATE \"C\", "
-                << "ALTER COLUMN dataname "
-                << "TYPE VARCHAR(88) COLLATE \"C\"";
-        }
-    }
-}
-
-class BulkLoadDataOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mAccountIDs;
-    std::vector<std::string> mDataNames;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string accountID, dataName, dataValue;
-        uint32_t lastModified;
-        std::string extension;
-        soci::indicator extensionInd;
-        std::string ledgerExtension;
-        soci::indicator ledgerExtInd;
-
-        st.exchange(soci::into(accountID));
-        st.exchange(soci::into(dataName));
-        st.exchange(soci::into(dataValue));
-        st.exchange(soci::into(lastModified));
-        st.exchange(soci::into(extension, extensionInd));
-        st.exchange(soci::into(ledgerExtension, ledgerExtInd));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("data");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-            le.data.type(DATA);
-            auto& de = le.data.data();
-
-            de.accountID = KeyUtils::fromStrKey<PublicKey>(accountID);
-            decoder::decode_b64(dataName, de.dataName);
-            decoder::decode_b64(dataValue, de.dataValue);
-            le.lastModifiedLedgerSeq = lastModified;
-
-            decodeOpaqueXDR(extension, extensionInd, de.ext);
-
-            decodeOpaqueXDR(ledgerExtension, ledgerExtInd, le.ext);
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadDataOperation(Database& db, UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mAccountIDs.reserve(keys.size());
-        mDataNames.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            releaseAssert(k.type() == DATA);
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(k.data().accountID));
-            mDataNames.emplace_back(decoder::encode_b64(k.data().dataName));
-        }
-    }
-
-    virtual std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        releaseAssert(mAccountIDs.size() == mDataNames.size());
-
-        std::vector<char const*> cstrAccountIDs;
-        std::vector<char const*> cstrDataNames;
-        cstrAccountIDs.reserve(mAccountIDs.size());
-        cstrDataNames.reserve(mDataNames.size());
-        for (size_t i = 0; i < mAccountIDs.size(); ++i)
-        {
-            cstrAccountIDs.emplace_back(mAccountIDs[i].c_str());
-            cstrDataNames.emplace_back(mDataNames[i].c_str());
-        }
-
-        std::string sqlJoin =
-            "SELECT x.value, y.value FROM "
-            "(SELECT rowid, value FROM carray(?, ?, 'char*') ORDER BY rowid) "
-            "AS x "
-            "INNER JOIN (SELECT rowid, value FROM carray(?, ?, 'char*') ORDER "
-            "BY rowid) AS y ON x.rowid = y.rowid";
-        std::string sql = "WITH r AS (" + sqlJoin +
-                          ") SELECT accountid, dataname, datavalue, "
-                          "lastmodified, extension, "
-                          "ledgerext "
-                          "FROM accountdata WHERE (accountid, dataname) IN r";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, cstrAccountIDs.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(cstrAccountIDs.size()));
-        sqlite3_bind_pointer(st, 3, cstrDataNames.data(), "carray", 0);
-        sqlite3_bind_int(st, 4, static_cast<int>(cstrDataNames.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        releaseAssert(mAccountIDs.size() == mDataNames.size());
-
-        std::string strAccountIDs;
-        std::string strDataNames;
-        marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs);
-        marshalToPGArray(pg->conn_, strDataNames, mDataNames);
-
-        std::string sql =
-            "WITH r AS (SELECT unnest(:v1::TEXT[]), unnest(:v2::TEXT[])) "
-            "SELECT accountid, dataname, datavalue, lastmodified, extension, "
-            "ledgerext "
-            "FROM accountdata WHERE (accountid, dataname) IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strDataNames));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadData(UnorderedSet<LedgerKey> const& keys) const
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(keys.size()));
-    if (!keys.empty())
-    {
-        BulkLoadDataOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-}
diff --git a/src/ledger/LedgerTxnImpl.h b/src/ledger/LedgerTxnImpl.h
index 4d71595f70..ad13c29807 100644
--- a/src/ledger/LedgerTxnImpl.h
+++ b/src/ledger/LedgerTxnImpl.h
@@ -20,7 +20,7 @@
 namespace stellar
 {
 
-class SearchableBucketListSnapshot;
+class SearchableLiveBucketListSnapshot;
 
 class EntryIterator::AbstractImpl
 {
@@ -54,52 +54,10 @@ class EntryIterator::AbstractImpl
 // reorganizing the relevant parts of soci.
 class BulkLedgerEntryChangeAccumulator
 {
-
-    std::vector<EntryIterator> mAccountsToUpsert;
-    std::vector<EntryIterator> mAccountsToDelete;
-    std::vector<EntryIterator> mAccountDataToUpsert;
-    std::vector<EntryIterator> mAccountDataToDelete;
-    std::vector<EntryIterator> mClaimableBalanceToUpsert;
-    std::vector<EntryIterator> mClaimableBalanceToDelete;
     std::vector<EntryIterator> mOffersToUpsert;
     std::vector<EntryIterator> mOffersToDelete;
-    std::vector<EntryIterator> mTrustLinesToUpsert;
-    std::vector<EntryIterator> mTrustLinesToDelete;
-    std::vector<EntryIterator> mLiquidityPoolToUpsert;
-    std::vector<EntryIterator> mLiquidityPoolToDelete;
-    std::vector<EntryIterator> mContractDataToUpsert;
-    std::vector<EntryIterator> mContractDataToDelete;
-    std::vector<EntryIterator> mContractCodeToUpsert;
-    std::vector<EntryIterator> mContractCodeToDelete;
-    std::vector<EntryIterator> mConfigSettingsToUpsert;
-    std::vector<EntryIterator> mTTLToUpsert;
-    std::vector<EntryIterator> mTTLToDelete;
 
   public:
-    std::vector<EntryIterator>&
-    getAccountsToUpsert()
-    {
-        return mAccountsToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getAccountsToDelete()
-    {
-        return mAccountsToDelete;
-    }
-
-    std::vector<EntryIterator>&
-    getTrustLinesToUpsert()
-    {
-        return mTrustLinesToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getTrustLinesToDelete()
-    {
-        return mTrustLinesToDelete;
-    }
-
     std::vector<EntryIterator>&
     getOffersToUpsert()
     {
@@ -112,85 +70,7 @@ class BulkLedgerEntryChangeAccumulator
         return mOffersToDelete;
     }
 
-    std::vector<EntryIterator>&
-    getAccountDataToUpsert()
-    {
-        return mAccountDataToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getAccountDataToDelete()
-    {
-        return mAccountDataToDelete;
-    }
-
-    std::vector<EntryIterator>&
-    getClaimableBalanceToUpsert()
-    {
-        return mClaimableBalanceToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getClaimableBalanceToDelete()
-    {
-        return mClaimableBalanceToDelete;
-    }
-
-    std::vector<EntryIterator>&
-    getLiquidityPoolToUpsert()
-    {
-        return mLiquidityPoolToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getLiquidityPoolToDelete()
-    {
-        return mLiquidityPoolToDelete;
-    }
-
-    std::vector<EntryIterator>&
-    getConfigSettingsToUpsert()
-    {
-        return mConfigSettingsToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getContractDataToUpsert()
-    {
-        return mContractDataToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getContractDataToDelete()
-    {
-        return mContractDataToDelete;
-    }
-
-    std::vector<EntryIterator>&
-    getContractCodeToUpsert()
-    {
-        return mContractCodeToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getContractCodeToDelete()
-    {
-        return mContractCodeToDelete;
-    }
-
-    std::vector<EntryIterator>&
-    getTTLToUpsert()
-    {
-        return mTTLToUpsert;
-    }
-
-    std::vector<EntryIterator>&
-    getTTLToDelete()
-    {
-        return mTTLToDelete;
-    }
-
-    bool accumulate(EntryIterator const& iter, bool bucketListDBEnabled);
+    bool accumulate(EntryIterator const& iter);
 };
 
 // Many functions in LedgerTxn::Impl provide a basic exception safety
@@ -737,7 +617,7 @@ class LedgerTxnRoot::Impl
     mutable BestOffers mBestOffers;
     mutable uint64_t mPrefetchHits{0};
     mutable uint64_t mPrefetchMisses{0};
-    mutable std::shared_ptr<SearchableBucketListSnapshot>
+    mutable std::shared_ptr<SearchableLiveBucketListSnapshot>
         mSearchableBucketListSnapshot{};
 
     size_t mBulkLoadBatchSize;
@@ -750,8 +630,6 @@ class LedgerTxnRoot::Impl
 
     void throwIfChild() const;
 
-    std::shared_ptr<LedgerEntry const> loadAccount(LedgerKey const& key) const;
-    std::shared_ptr<LedgerEntry const> loadData(LedgerKey const& key) const;
     std::shared_ptr<LedgerEntry const> loadOffer(LedgerKey const& key) const;
     std::vector<LedgerEntry> loadAllOffers() const;
     std::deque<LedgerEntry>::const_iterator
@@ -767,55 +645,12 @@ class LedgerTxnRoot::Impl
     loadOffersByAccountAndAsset(AccountID const& accountID,
                                 Asset const& asset) const;
     std::vector<LedgerEntry> loadOffers(StatementContext& prep) const;
-    std::vector<InflationWinner> loadInflationWinners(size_t maxWinners,
-                                                      int64_t minBalance) const;
-    std::shared_ptr<LedgerEntry const>
-    loadTrustLine(LedgerKey const& key) const;
-    std::vector<LedgerEntry>
-    loadPoolShareTrustLinesByAccountAndAsset(AccountID const& accountID,
-                                             Asset const& asset) const;
-    std::shared_ptr<LedgerEntry const>
-    loadClaimableBalance(LedgerKey const& key) const;
-    std::shared_ptr<LedgerEntry const>
-    loadLiquidityPool(LedgerKey const& key) const;
-    std::shared_ptr<LedgerEntry const>
-    loadContractData(LedgerKey const& key) const;
-    std::shared_ptr<LedgerEntry const>
-    loadContractCode(LedgerKey const& key) const;
-    std::shared_ptr<LedgerEntry const>
-    loadConfigSetting(LedgerKey const& key) const;
-    std::shared_ptr<LedgerEntry const> loadTTL(LedgerKey const& key) const;
 
     void bulkApply(BulkLedgerEntryChangeAccumulator& bleca,
                    size_t bufferThreshold, LedgerTxnConsistency cons);
-    void bulkUpsertAccounts(std::vector<EntryIterator> const& entries);
-    void bulkDeleteAccounts(std::vector<EntryIterator> const& entries,
-                            LedgerTxnConsistency cons);
-    void bulkUpsertTrustLines(std::vector<EntryIterator> const& entries);
-    void bulkDeleteTrustLines(std::vector<EntryIterator> const& entries,
-                              LedgerTxnConsistency cons);
     void bulkUpsertOffers(std::vector<EntryIterator> const& entries);
     void bulkDeleteOffers(std::vector<EntryIterator> const& entries,
                           LedgerTxnConsistency cons);
-    void bulkUpsertAccountData(std::vector<EntryIterator> const& entries);
-    void bulkDeleteAccountData(std::vector<EntryIterator> const& entries,
-                               LedgerTxnConsistency cons);
-    void bulkUpsertClaimableBalance(std::vector<EntryIterator> const& entries);
-    void bulkDeleteClaimableBalance(std::vector<EntryIterator> const& entries,
-                                    LedgerTxnConsistency cons);
-    void bulkUpsertLiquidityPool(std::vector<EntryIterator> const& entries);
-    void bulkDeleteLiquidityPool(std::vector<EntryIterator> const& entries,
-                                 LedgerTxnConsistency cons);
-    void bulkUpsertContractData(std::vector<EntryIterator> const& entries);
-    void bulkDeleteContractData(std::vector<EntryIterator> const& entries,
-                                LedgerTxnConsistency cons);
-    void bulkUpsertContractCode(std::vector<EntryIterator> const& entries);
-    void bulkDeleteContractCode(std::vector<EntryIterator> const& entries,
-                                LedgerTxnConsistency cons);
-    void bulkUpsertConfigSettings(std::vector<EntryIterator> const& entries);
-    void bulkUpsertTTL(std::vector<EntryIterator> const& entries);
-    void bulkDeleteTTL(std::vector<EntryIterator> const& entries,
-                       LedgerTxnConsistency cons);
 
     static std::string tableFromLedgerEntryType(LedgerEntryType let);
 
@@ -841,27 +676,8 @@ class LedgerTxnRoot::Impl
     BestOffersEntryPtr getFromBestOffers(Asset const& buying,
                                          Asset const& selling) const;
 
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadAccounts(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadTrustLines(UnorderedSet<LedgerKey> const& keys) const;
     UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
     bulkLoadOffers(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadData(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadClaimableBalance(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadLiquidityPool(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadContractData(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadContractCode(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadConfigSettings(UnorderedSet<LedgerKey> const& keys) const;
-    UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-    bulkLoadTTL(UnorderedSet<LedgerKey> const& keys) const;
-
     std::deque<LedgerEntry>::const_iterator
     loadNextBestOffersIntoCache(BestOffersEntryPtr cached, Asset const& buying,
                                 Asset const& selling);
@@ -871,7 +687,8 @@ class LedgerTxnRoot::Impl
 
     bool areEntriesMissingInCacheForOffer(OfferEntry const& oe);
 
-    SearchableBucketListSnapshot& getSearchableBucketListSnapshot() const;
+    SearchableLiveBucketListSnapshot&
+    getSearchableLiveBucketListSnapshot() const;
 
     uint32_t prefetchInternal(UnorderedSet<LedgerKey> const& keys,
                               LedgerKeyMeter* lkMeter = nullptr);
@@ -892,26 +709,14 @@ class LedgerTxnRoot::Impl
 
     void commitChild(EntryIterator iter, LedgerTxnConsistency cons) noexcept;
 
-    // countObjects has the strong exception safety guarantee.
-    uint64_t countObjects(LedgerEntryType let) const;
-    uint64_t countObjects(LedgerEntryType let,
-                          LedgerRange const& ledgers) const;
+    // countOffers has the strong exception safety guarantee.
+    uint64_t countOffers(LedgerRange const& ledgers) const;
 
-    // deleteObjectsModifiedOnOrAfterLedger has no exception safety guarantees.
-    void deleteObjectsModifiedOnOrAfterLedger(uint32_t ledger) const;
+    // deleteOffersModifiedOnOrAfterLedger has no exception safety guarantees.
+    void deleteOffersModifiedOnOrAfterLedger(uint32_t ledger) const;
 
-    // dropAccounts, dropData, dropOffers, and dropTrustLines have no exception
-    // safety guarantees.
-    void dropAccounts(bool rebuild);
-    void dropData(bool rebuild);
+    // no exception safety guarantees.
     void dropOffers(bool rebuild);
-    void dropTrustLines(bool rebuild);
-    void dropClaimableBalances(bool rebuild);
-    void dropLiquidityPools(bool rebuild);
-    void dropContractData(bool rebuild);
-    void dropContractCode(bool rebuild);
-    void dropConfigSettings(bool rebuild);
-    void dropTTL(bool rebuild);
 
 #ifdef BUILD_TESTS
     void resetForFuzzer();
diff --git a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp b/src/ledger/LedgerTxnLiquidityPoolSQL.cpp
deleted file mode 100644
index ce8289b284..0000000000
--- a/src/ledger/LedgerTxnLiquidityPoolSQL.cpp
+++ /dev/null
@@ -1,419 +0,0 @@
-// Copyright 2020 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "ledger/NonSociRelatedException.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/types.h"
-
-namespace stellar
-{
-
-static void
-throwIfNotLiquidityPool(LedgerEntryType type)
-{
-    if (type != LIQUIDITY_POOL)
-    {
-        throw NonSociRelatedException("LedgerEntry is not a LIQUIDITY_POOL");
-    }
-}
-
-static std::string
-getPrimaryKey(PoolID const& poolID)
-{
-    TrustLineAsset tla(ASSET_TYPE_POOL_SHARE);
-    tla.liquidityPoolID() = poolID;
-    return toOpaqueBase64(tla);
-}
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadLiquidityPool(LedgerKey const& key) const
-{
-    auto poolAsset = getPrimaryKey(key.liquidityPool().liquidityPoolID);
-
-    std::string liquidityPoolEntryStr;
-
-    std::string sql = "SELECT ledgerentry "
-                      "FROM liquiditypool "
-                      "WHERE poolasset= :poolasset";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(liquidityPoolEntryStr));
-    st.exchange(soci::use(poolAsset));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("liquiditypool");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    LedgerEntry le;
-    fromOpaqueBase64(le, liquidityPoolEntryStr);
-    throwIfNotLiquidityPool(le.data.type());
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-class BulkLoadLiquidityPoolOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mPoolAssets;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string liquidityPoolEntryStr;
-
-        st.exchange(soci::into(liquidityPoolEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("liquiditypool");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, liquidityPoolEntryStr);
-            throwIfNotLiquidityPool(le.data.type());
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadLiquidityPoolOperation(Database& db,
-                                   UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mPoolAssets.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            throwIfNotLiquidityPool(k.type());
-            mPoolAssets.emplace_back(
-                getPrimaryKey(k.liquidityPool().liquidityPoolID));
-        }
-    }
-
-    std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::vector<char const*> cstrPoolAssets;
-        cstrPoolAssets.reserve(mPoolAssets.size());
-        for (size_t i = 0; i < mPoolAssets.size(); ++i)
-        {
-            cstrPoolAssets.emplace_back(mPoolAssets[i].c_str());
-        }
-
-        std::string sql = "WITH r AS (SELECT value FROM carray(?, ?, 'char*')) "
-                          "SELECT ledgerentry "
-                          "FROM liquiditypool "
-                          "WHERE poolasset IN r";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, cstrPoolAssets.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(cstrPoolAssets.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strPoolAssets;
-        marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "SELECT ledgerentry "
-                          "FROM liquiditypool "
-                          "WHERE poolasset IN (SELECT * from r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strPoolAssets));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadLiquidityPool(
-    UnorderedSet<LedgerKey> const& keys) const
-{
-    if (!keys.empty())
-    {
-        BulkLoadLiquidityPoolOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-
-class BulkDeleteLiquidityPoolOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mPoolAssets;
-
-  public:
-    BulkDeleteLiquidityPoolOperation(Database& db, LedgerTxnConsistency cons,
-                                     std::vector<EntryIterator> const& entries)
-        : mDb(db), mCons(cons)
-    {
-        mPoolAssets.reserve(entries.size());
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            throwIfNotLiquidityPool(e.key().ledgerKey().type());
-            mPoolAssets.emplace_back(getPrimaryKey(
-                e.key().ledgerKey().liquidityPool().liquidityPoolID));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM liquiditypool WHERE poolasset = :id";
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(mPoolAssets));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("liquiditypool");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mPoolAssets.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strPoolAssets;
-        marshalToPGArray(pg->conn_, strPoolAssets, mPoolAssets);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "DELETE FROM liquiditypool "
-                          "WHERE poolasset IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strPoolAssets));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("liquiditypool");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mPoolAssets.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkDeleteLiquidityPool(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    BulkDeleteLiquidityPoolOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-class BulkUpsertLiquidityPoolOperation
-    : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    std::vector<std::string> mPoolAssets;
-    std::vector<std::string> mAssetAs;
-    std::vector<std::string> mAssetBs;
-    std::vector<std::string> mLiquidityPoolEntries;
-    std::vector<int32_t> mLastModifieds;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        throwIfNotLiquidityPool(entry.data.type());
-
-        auto const& lp = entry.data.liquidityPool();
-        auto const& cp = lp.body.constantProduct();
-        mPoolAssets.emplace_back(getPrimaryKey(lp.liquidityPoolID));
-        mAssetAs.emplace_back(toOpaqueBase64(cp.params.assetA));
-        mAssetBs.emplace_back(toOpaqueBase64(cp.params.assetB));
-        mLiquidityPoolEntries.emplace_back(toOpaqueBase64(entry));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-    }
-
-  public:
-    BulkUpsertLiquidityPoolOperation(
-        Database& Db, std::vector<EntryIterator> const& entryIter)
-        : mDb(Db)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql =
-            "INSERT INTO liquiditypool "
-            "(poolasset, asseta, assetb, ledgerentry, lastmodified) "
-            "VALUES "
-            "( :id, :v1, :v2, :v3, :v4 ) "
-            "ON CONFLICT (poolasset) DO UPDATE SET "
-            "asseta = excluded.asseta, "
-            "assetb = excluded.assetb, "
-            "ledgerentry = excluded.ledgerentry, "
-            "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mPoolAssets));
-        st.exchange(soci::use(mAssetAs));
-        st.exchange(soci::use(mAssetBs));
-        st.exchange(soci::use(mLiquidityPoolEntries));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("liquiditypool");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mPoolAssets.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strPoolAssets, strAssetAs, strAssetBs,
-            strLiquidityPoolEntry, strLastModifieds;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strPoolAssets, mPoolAssets);
-        marshalToPGArray(conn, strAssetAs, mAssetAs);
-        marshalToPGArray(conn, strAssetBs, mAssetBs);
-        marshalToPGArray(conn, strLiquidityPoolEntry, mLiquidityPoolEntries);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql =
-            "WITH r AS "
-            "(SELECT unnest(:ids::TEXT[]), unnest(:v1::TEXT[]), "
-            "unnest(:v2::TEXT[]), unnest(:v3::TEXT[]), "
-            "unnest(:v4::INT[])) "
-            "INSERT INTO liquiditypool "
-            "(poolasset, asseta, assetb, ledgerentry, lastmodified) "
-            "SELECT * FROM r "
-            "ON CONFLICT (poolasset) DO UPDATE SET "
-            "asseta = excluded.asseta, "
-            "assetb = excluded.assetb, "
-            "ledgerentry = excluded.ledgerentry, "
-            "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strPoolAssets));
-        st.exchange(soci::use(strAssetAs));
-        st.exchange(soci::use(strAssetBs));
-        st.exchange(soci::use(strLiquidityPoolEntry));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("liquiditypool");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mPoolAssets.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertLiquidityPool(
-    std::vector<EntryIterator> const& entries)
-{
-    BulkUpsertLiquidityPoolOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropLiquidityPools(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS liquiditypool;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-        // The primary key is poolasset (the base-64 opaque TrustLineAsset
-        // containing the PoolID) instead of poolid (the base-64 opaque PoolID)
-        // so that we can perform the join in load pool share trust lines by
-        // account and asset.
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE liquiditypool ("
-            << "poolasset    TEXT " << coll << " PRIMARY KEY, "
-            << "asseta       TEXT " << coll << " NOT NULL, "
-            << "assetb       TEXT " << coll << " NOT NULL, "
-            << "ledgerentry  TEXT NOT NULL, "
-            << "lastmodified INT NOT NULL);";
-        mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolasseta "
-                                        << "ON liquiditypool(asseta);";
-        mApp.getDatabase().getSession() << "CREATE INDEX liquiditypoolassetb "
-                                        << "ON liquiditypool(assetb);";
-    }
-}
-}
diff --git a/src/ledger/LedgerTxnTTLSQL.cpp b/src/ledger/LedgerTxnTTLSQL.cpp
deleted file mode 100644
index 363923a14d..0000000000
--- a/src/ledger/LedgerTxnTTLSQL.cpp
+++ /dev/null
@@ -1,381 +0,0 @@
-
-// Copyright 2023 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "ledger/NonSociRelatedException.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/types.h"
-
-namespace stellar
-{
-
-static void
-throwIfNotTTL(LedgerEntryType type)
-{
-    if (type != TTL)
-    {
-        throw NonSociRelatedException("LedgerEntry is not TTL");
-    }
-}
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadTTL(LedgerKey const& key) const
-{
-    auto keyHash = toOpaqueBase64(key.ttl().keyHash);
-    std::string ttlEntryStr;
-
-    std::string sql = "SELECT ledgerentry "
-                      "FROM ttl "
-                      "WHERE keyhash = :keyHash";
-    auto prep = mApp.getDatabase().getPreparedStatement(sql);
-    auto& st = prep.statement();
-    st.exchange(soci::into(ttlEntryStr));
-    st.exchange(soci::use(keyHash));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("ttl");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    LedgerEntry le;
-    fromOpaqueBase64(le, ttlEntryStr);
-    throwIfNotTTL(le.data.type());
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-class BulkLoadTTLOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mKeyHashes;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string ttlEntryStr;
-
-        st.exchange(soci::into(ttlEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("ttl");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, ttlEntryStr);
-            throwIfNotTTL(le.data.type());
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadTTLOperation(Database& db, UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mKeyHashes.reserve(keys.size());
-        for (auto const& k : keys)
-        {
-            throwIfNotTTL(k.type());
-            mKeyHashes.emplace_back(toOpaqueBase64(k.ttl().keyHash));
-        }
-    }
-
-    std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        std::vector<char const*> cStrKeyHashes;
-        cStrKeyHashes.reserve(mKeyHashes.size());
-        for (auto const& h : mKeyHashes)
-        {
-            cStrKeyHashes.emplace_back(h.c_str());
-        }
-        std::string sql = "SELECT ledgerentry "
-                          "FROM ttl "
-                          "WHERE keyhash IN carray(?, ?, 'char*')";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, (void*)cStrKeyHashes.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(cStrKeyHashes.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strKeyHashes;
-        marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "SELECT ledgerentry "
-                          "FROM ttl "
-                          "WHERE (keyHash) IN (SELECT * from r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strKeyHashes));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadTTL(UnorderedSet<LedgerKey> const& keys) const
-{
-    if (!keys.empty())
-    {
-        BulkLoadTTLOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-
-class BulkDeleteTTLOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mKeyHashes;
-
-  public:
-    BulkDeleteTTLOperation(Database& db, LedgerTxnConsistency cons,
-                           std::vector<EntryIterator> const& entries)
-        : mDb(db), mCons(cons)
-    {
-        mKeyHashes.reserve(entries.size());
-        for (auto const& e : entries)
-        {
-            releaseAssertOrThrow(!e.entryExists());
-            throwIfNotTTL(e.key().ledgerKey().type());
-            mKeyHashes.emplace_back(
-                toOpaqueBase64(e.key().ledgerKey().ttl().keyHash));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM ttl WHERE keyhash = :id";
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(mKeyHashes));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("ttl");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mKeyHashes.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strKeyHashes;
-        marshalToPGArray(pg->conn_, strKeyHashes, mKeyHashes);
-
-        std::string sql = "WITH r AS (SELECT unnest(:v1::TEXT[])) "
-                          "DELETE FROM ttl "
-                          "WHERE keyHash IN (SELECT * FROM r)";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto& st = prep.statement();
-        st.exchange(soci::use(strKeyHashes));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getDeleteTimer("ttl");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mKeyHashes.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkDeleteTTL(std::vector<EntryIterator> const& entries,
-                                   LedgerTxnConsistency cons)
-{
-    BulkDeleteTTLOperation op(mApp.getDatabase(), cons, entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-class BulkUpsertTTLOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDb;
-    std::vector<std::string> mKeyHashes;
-    std::vector<std::string> mTTLEntries;
-    std::vector<int32_t> mLastModifieds;
-
-    void
-    accumulateEntry(LedgerEntry const& entry)
-    {
-        throwIfNotTTL(entry.data.type());
-
-        mKeyHashes.emplace_back(toOpaqueBase64(entry.data.ttl().keyHash));
-        mTTLEntries.emplace_back(toOpaqueBase64(entry));
-        mLastModifieds.emplace_back(
-            unsignedToSigned(entry.lastModifiedLedgerSeq));
-    }
-
-  public:
-    BulkUpsertTTLOperation(Database& Db,
-                           std::vector<EntryIterator> const& entryIter)
-        : mDb(Db)
-    {
-        for (auto const& e : entryIter)
-        {
-            releaseAssert(e.entryExists());
-            accumulateEntry(e.entry().ledgerEntry());
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "INSERT INTO ttl "
-                          "(keyhash, ledgerentry, lastmodified) "
-                          "VALUES "
-                          "( :keyHash, :v1, :v2 ) "
-                          "ON CONFLICT (keyhash) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mKeyHashes));
-        st.exchange(soci::use(mTTLEntries));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("ttl");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mKeyHashes.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strKeyHashes, strTTLEntries, strLastModifieds;
-
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strKeyHashes, mKeyHashes);
-        marshalToPGArray(conn, strTTLEntries, mTTLEntries);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql = "WITH r AS "
-                          "(SELECT unnest(:v1::TEXT[]), "
-                          "unnest(:v2::TEXT[]), unnest(:v3::INT[])) "
-                          "INSERT INTO ttl "
-                          "(keyHash, ledgerentry, lastmodified) "
-                          "SELECT * FROM r "
-                          "ON CONFLICT (keyhash) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strKeyHashes));
-        st.exchange(soci::use(strTTLEntries));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getUpsertTimer("ttl");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mKeyHashes.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertTTL(std::vector<EntryIterator> const& entries)
-{
-    BulkUpsertTTLOperation op(mApp.getDatabase(), entries);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropTTL(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    std::string coll = mApp.getDatabase().getSimpleCollationClause();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS ttl;";
-
-    if (rebuild)
-    {
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE ttl ("
-            << "keyhash   TEXT " << coll << " NOT NULL, "
-            << "ledgerentry  TEXT " << coll << " NOT NULL, "
-            << "lastmodified INT NOT NULL, "
-            << "PRIMARY KEY (keyhash));";
-        if (!mApp.getDatabase().isSqlite())
-        {
-            mApp.getDatabase().getSession() << "ALTER TABLE ttl "
-                                            << "ALTER COLUMN keyhash "
-                                            << "TYPE TEXT COLLATE \"C\";";
-        }
-    }
-}
-
-}
\ No newline at end of file
diff --git a/src/ledger/LedgerTxnTrustLineSQL.cpp b/src/ledger/LedgerTxnTrustLineSQL.cpp
deleted file mode 100644
index 78631cd25a..0000000000
--- a/src/ledger/LedgerTxnTrustLineSQL.cpp
+++ /dev/null
@@ -1,521 +0,0 @@
-// Copyright 2017 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "crypto/KeyUtils.h"
-#include "crypto/SecretKey.h"
-#include "database/Database.h"
-#include "database/DatabaseTypeSpecificOperation.h"
-#include "ledger/LedgerTxnImpl.h"
-#include "ledger/LedgerTypeUtils.h"
-#include "ledger/NonSociRelatedException.h"
-#include "main/Application.h"
-#include "util/GlobalChecks.h"
-#include "util/Logging.h"
-#include "util/XDROperators.h"
-#include "util/types.h"
-#include <Tracy.hpp>
-
-namespace stellar
-{
-
-void
-validateTrustLineKey(uint32_t ledgerVersion, LedgerKey const& key)
-{
-    auto const& asset = key.trustLine().asset;
-
-    if (!isAssetValid(asset, ledgerVersion))
-    {
-        throw NonSociRelatedException("TrustLine asset is invalid");
-    }
-    else if (asset.type() == ASSET_TYPE_NATIVE)
-    {
-        throw NonSociRelatedException("XLM TrustLine?");
-    }
-    else if (isIssuer(key.trustLine().accountID, asset))
-    {
-        throw NonSociRelatedException("TrustLine accountID is issuer");
-    }
-}
-
-std::shared_ptr<LedgerEntry const>
-LedgerTxnRoot::Impl::loadTrustLine(LedgerKey const& key) const
-{
-    ZoneScoped;
-
-    validateTrustLineKey(mHeader->ledgerVersion, key);
-
-    std::string accountIDStr = KeyUtils::toStrKey(key.trustLine().accountID);
-    auto asset = toOpaqueBase64(key.trustLine().asset);
-
-    std::string trustLineEntryStr;
-
-    auto prep = mApp.getDatabase().getPreparedStatement(
-        "SELECT ledgerentry "
-        " FROM trustlines "
-        "WHERE accountid= :id AND asset= :asset");
-    auto& st = prep.statement();
-    st.exchange(soci::into(trustLineEntryStr));
-    st.exchange(soci::use(accountIDStr));
-    st.exchange(soci::use(asset));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("trust");
-        st.execute(true);
-    }
-    if (!st.got_data())
-    {
-        return nullptr;
-    }
-
-    LedgerEntry le;
-    fromOpaqueBase64(le, trustLineEntryStr);
-    if (le.data.type() != TRUSTLINE)
-    {
-        throw NonSociRelatedException("Loaded non-trustline entry");
-    }
-
-    return std::make_shared<LedgerEntry const>(std::move(le));
-}
-
-std::vector<LedgerEntry>
-LedgerTxnRoot::Impl::loadPoolShareTrustLinesByAccountAndAsset(
-    AccountID const& accountID, Asset const& asset) const
-{
-    ZoneScoped;
-
-    std::string accountIDStr = KeyUtils::toStrKey(accountID);
-    auto assetStr = toOpaqueBase64(asset);
-
-    std::string trustLineEntryStr;
-
-    auto prep = mApp.getDatabase().getPreparedStatement(
-        "SELECT trustlines.ledgerentry "
-        "FROM trustlines "
-        "INNER JOIN liquiditypool "
-        "ON trustlines.asset = liquiditypool.poolasset "
-        "AND trustlines.accountid = :v1 "
-        "AND (liquiditypool.asseta = :v2 OR liquiditypool.assetb = :v3)");
-    auto& st = prep.statement();
-    st.exchange(soci::into(trustLineEntryStr));
-    st.exchange(soci::use(accountIDStr));
-    st.exchange(soci::use(assetStr));
-    st.exchange(soci::use(assetStr));
-    st.define_and_bind();
-    {
-        auto timer = mApp.getDatabase().getSelectTimer("trust");
-        st.execute(true);
-    }
-
-    std::vector<LedgerEntry> trustLines;
-    while (st.got_data())
-    {
-        trustLines.emplace_back();
-        fromOpaqueBase64(trustLines.back(), trustLineEntryStr);
-        if (trustLines.back().data.type() != TRUSTLINE)
-        {
-            throw NonSociRelatedException("Loaded non-trustline entry");
-        }
-        st.fetch();
-    }
-    return trustLines;
-}
-
-class BulkUpsertTrustLinesOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDB;
-    std::vector<std::string> mAccountIDs;
-    std::vector<std::string> mAssets;
-    std::vector<std::string> mTrustLineEntries;
-    std::vector<int32_t> mLastModifieds;
-
-  public:
-    BulkUpsertTrustLinesOperation(Database& DB,
-                                  std::vector<EntryIterator> const& entries,
-                                  uint32_t ledgerVersion)
-        : mDB(DB)
-    {
-        mAccountIDs.reserve(entries.size());
-        mAssets.reserve(entries.size());
-        mTrustLineEntries.reserve(entries.size());
-        mLastModifieds.reserve(entries.size());
-
-        for (auto const& e : entries)
-        {
-            releaseAssert(e.entryExists());
-            releaseAssert(e.entry().type() ==
-                          InternalLedgerEntryType::LEDGER_ENTRY);
-            auto const& le = e.entry().ledgerEntry();
-            releaseAssert(le.data.type() == TRUSTLINE);
-
-            auto const& tl = le.data.trustLine();
-
-            validateTrustLineKey(ledgerVersion, e.key().ledgerKey());
-
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(tl.accountID));
-            mAssets.emplace_back(toOpaqueBase64(tl.asset));
-            mTrustLineEntries.emplace_back(toOpaqueBase64(le));
-            mLastModifieds.emplace_back(
-                unsignedToSigned(le.lastModifiedLedgerSeq));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "INSERT INTO trustlines ( "
-                          "accountid, asset, ledgerentry, lastmodified)"
-                          "VALUES ( "
-                          ":id, :v1, :v2, :v3 "
-                          ") ON CONFLICT (accountid, asset) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mAccountIDs));
-        st.exchange(soci::use(mAssets));
-        st.exchange(soci::use(mTrustLineEntries));
-        st.exchange(soci::use(mLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getUpsertTimer("trustline");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        PGconn* conn = pg->conn_;
-
-        std::string strAccountIDs, strAssets, strTrustLineEntries,
-            strLastModifieds;
-
-        marshalToPGArray(conn, strAccountIDs, mAccountIDs);
-        marshalToPGArray(conn, strAssets, mAssets);
-        marshalToPGArray(conn, strTrustLineEntries, mTrustLineEntries);
-        marshalToPGArray(conn, strLastModifieds, mLastModifieds);
-
-        std::string sql = "WITH r AS (SELECT "
-                          "unnest(:ids::TEXT[]), "
-                          "unnest(:v1::TEXT[]), "
-                          "unnest(:v2::TEXT[]), "
-                          "unnest(:v3::INT[])) "
-                          "INSERT INTO trustlines ( "
-                          "accountid, asset, ledgerEntry, lastmodified"
-                          ") SELECT * from r "
-                          "ON CONFLICT (accountid, asset) DO UPDATE SET "
-                          "ledgerentry = excluded.ledgerentry, "
-                          "lastmodified = excluded.lastmodified";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strAssets));
-        st.exchange(soci::use(strTrustLineEntries));
-        st.exchange(soci::use(strLastModifieds));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getUpsertTimer("trustline");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size())
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-class BulkDeleteTrustLinesOperation : public DatabaseTypeSpecificOperation<void>
-{
-    Database& mDB;
-    LedgerTxnConsistency mCons;
-    std::vector<std::string> mAccountIDs;
-    std::vector<std::string> mAssets;
-
-  public:
-    BulkDeleteTrustLinesOperation(Database& DB, LedgerTxnConsistency cons,
-                                  std::vector<EntryIterator> const& entries,
-                                  uint32_t ledgerVersion)
-        : mDB(DB), mCons(cons)
-    {
-        mAccountIDs.reserve(entries.size());
-        mAssets.reserve(entries.size());
-        for (auto const& e : entries)
-        {
-            releaseAssert(!e.entryExists());
-            releaseAssert(e.key().type() ==
-                          InternalLedgerEntryType::LEDGER_ENTRY);
-            releaseAssert(e.key().ledgerKey().type() == TRUSTLINE);
-            auto const& tl = e.key().ledgerKey().trustLine();
-
-            validateTrustLineKey(ledgerVersion, e.key().ledgerKey());
-
-            mAccountIDs.emplace_back(KeyUtils::toStrKey(tl.accountID));
-            mAssets.emplace_back(toOpaqueBase64(tl.asset));
-        }
-    }
-
-    void
-    doSociGenericOperation()
-    {
-        std::string sql = "DELETE FROM trustlines WHERE accountid = :id "
-                          "AND asset = :v1";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(mAccountIDs));
-        st.exchange(soci::use(mAssets));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getDeleteTimer("trustline");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-
-    void
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        doSociGenericOperation();
-    }
-
-#ifdef USE_POSTGRES
-    void
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        std::string strAccountIDs, strAssets;
-        PGconn* conn = pg->conn_;
-        marshalToPGArray(conn, strAccountIDs, mAccountIDs);
-        marshalToPGArray(conn, strAssets, mAssets);
-        std::string sql = "WITH r AS (SELECT "
-                          "unnest(:ids::TEXT[]), "
-                          "unnest(:v1::TEXT[])"
-                          ") "
-                          "DELETE FROM trustlines WHERE "
-                          "(accountid, asset) IN (SELECT * FROM r)";
-        auto prep = mDB.getPreparedStatement(sql);
-        soci::statement& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strAssets));
-        st.define_and_bind();
-        {
-            auto timer = mDB.getDeleteTimer("trustline");
-            st.execute(true);
-        }
-        if (static_cast<size_t>(st.get_affected_rows()) != mAccountIDs.size() &&
-            mCons == LedgerTxnConsistency::EXACT)
-        {
-            throw std::runtime_error("Could not update data in SQL");
-        }
-    }
-#endif
-};
-
-void
-LedgerTxnRoot::Impl::bulkUpsertTrustLines(
-    std::vector<EntryIterator> const& entries)
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(entries.size()));
-    BulkUpsertTrustLinesOperation op(mApp.getDatabase(), entries,
-                                     mHeader->ledgerVersion);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::bulkDeleteTrustLines(
-    std::vector<EntryIterator> const& entries, LedgerTxnConsistency cons)
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(entries.size()));
-    BulkDeleteTrustLinesOperation op(mApp.getDatabase(), cons, entries,
-                                     mHeader->ledgerVersion);
-    mApp.getDatabase().doDatabaseTypeSpecificOperation(op);
-}
-
-void
-LedgerTxnRoot::Impl::dropTrustLines(bool rebuild)
-{
-    throwIfChild();
-    mEntryCache.clear();
-    mBestOffers.clear();
-
-    mApp.getDatabase().getSession() << "DROP TABLE IF EXISTS trustlines;";
-
-    if (rebuild)
-    {
-        std::string coll = mApp.getDatabase().getSimpleCollationClause();
-        mApp.getDatabase().getSession()
-            << "CREATE TABLE trustlines"
-            << "("
-            << "accountid    VARCHAR(56) " << coll << " NOT NULL,"
-            << "asset        TEXT " << coll << " NOT NULL,"
-            << "ledgerentry  TEXT NOT NULL,"
-            << "lastmodified INT  NOT NULL,"
-            << "PRIMARY KEY  (accountid, asset));";
-    }
-}
-
-class BulkLoadTrustLinesOperation
-    : public DatabaseTypeSpecificOperation<std::vector<LedgerEntry>>
-{
-    Database& mDb;
-    std::vector<std::string> mAccountIDs;
-    std::vector<std::string> mAssets;
-
-    std::vector<LedgerEntry>
-    executeAndFetch(soci::statement& st)
-    {
-        std::string accountID, asset, trustLineEntryStr;
-
-        st.exchange(soci::into(accountID));
-        st.exchange(soci::into(asset));
-        st.exchange(soci::into(trustLineEntryStr));
-        st.define_and_bind();
-        {
-            auto timer = mDb.getSelectTimer("trust");
-            st.execute(true);
-        }
-
-        std::vector<LedgerEntry> res;
-        while (st.got_data())
-        {
-            res.emplace_back();
-            auto& le = res.back();
-
-            fromOpaqueBase64(le, trustLineEntryStr);
-            releaseAssert(le.data.type() == TRUSTLINE);
-            releaseAssert(le.data.trustLine().asset.type() !=
-                          ASSET_TYPE_NATIVE);
-
-            st.fetch();
-        }
-        return res;
-    }
-
-  public:
-    BulkLoadTrustLinesOperation(Database& db,
-                                UnorderedSet<LedgerKey> const& keys)
-        : mDb(db)
-    {
-        mAccountIDs.reserve(keys.size());
-        mAssets.reserve(keys.size());
-
-        for (auto const& k : keys)
-        {
-            releaseAssert(k.type() == TRUSTLINE);
-            if (k.trustLine().asset.type() == ASSET_TYPE_NATIVE)
-            {
-                throw NonSociRelatedException(
-                    "TrustLine asset can't be native");
-            }
-
-            mAccountIDs.emplace_back(
-                KeyUtils::toStrKey(k.trustLine().accountID));
-            mAssets.emplace_back(toOpaqueBase64(k.trustLine().asset));
-        }
-    }
-
-    virtual std::vector<LedgerEntry>
-    doSqliteSpecificOperation(soci::sqlite3_session_backend* sq) override
-    {
-        releaseAssert(mAccountIDs.size() == mAssets.size());
-
-        std::vector<char const*> cstrAccountIDs;
-        std::vector<char const*> cstrAssets;
-        cstrAccountIDs.reserve(mAccountIDs.size());
-        cstrAssets.reserve(mAssets.size());
-        for (size_t i = 0; i < mAccountIDs.size(); ++i)
-        {
-            cstrAccountIDs.emplace_back(mAccountIDs[i].c_str());
-            cstrAssets.emplace_back(mAssets[i].c_str());
-        }
-
-        std::string sqlJoin = "SELECT x.value, y.value FROM "
-                              "(SELECT rowid, value FROM carray(?, ?, "
-                              "'char*') ORDER BY rowid) "
-                              "AS x "
-                              "INNER JOIN (SELECT rowid, value FROM "
-                              "carray(?, ?, 'char*') ORDER "
-                              "BY rowid) AS y ON x.rowid = y.rowid ";
-        std::string sql = "WITH r AS (" + sqlJoin +
-                          ") SELECT accountid, asset, ledgerentry "
-                          "FROM trustlines WHERE (accountid, asset) IN r";
-
-        auto prep = mDb.getPreparedStatement(sql);
-        auto be = prep.statement().get_backend();
-        if (be == nullptr)
-        {
-            throw std::runtime_error("no sql backend");
-        }
-        auto sqliteStatement =
-            dynamic_cast<soci::sqlite3_statement_backend*>(be);
-        auto st = sqliteStatement->stmt_;
-
-        sqlite3_reset(st);
-        sqlite3_bind_pointer(st, 1, cstrAccountIDs.data(), "carray", 0);
-        sqlite3_bind_int(st, 2, static_cast<int>(cstrAccountIDs.size()));
-        sqlite3_bind_pointer(st, 3, cstrAssets.data(), "carray", 0);
-        sqlite3_bind_int(st, 4, static_cast<int>(cstrAssets.size()));
-        return executeAndFetch(prep.statement());
-    }
-
-#ifdef USE_POSTGRES
-    virtual std::vector<LedgerEntry>
-    doPostgresSpecificOperation(soci::postgresql_session_backend* pg) override
-    {
-        releaseAssert(mAccountIDs.size() == mAssets.size());
-
-        std::string strAccountIDs;
-        std::string strAssets;
-        marshalToPGArray(pg->conn_, strAccountIDs, mAccountIDs);
-        marshalToPGArray(pg->conn_, strAssets, mAssets);
-
-        auto prep = mDb.getPreparedStatement(
-            "WITH r AS (SELECT unnest(:v1::TEXT[]), "
-            "unnest(:v2::TEXT[])) SELECT accountid, asset, "
-            "ledgerentry "
-            " FROM trustlines "
-            "WHERE (accountid, asset) IN (SELECT * "
-            "FROM r)");
-        auto& st = prep.statement();
-        st.exchange(soci::use(strAccountIDs));
-        st.exchange(soci::use(strAssets));
-        return executeAndFetch(st);
-    }
-#endif
-};
-
-UnorderedMap<LedgerKey, std::shared_ptr<LedgerEntry const>>
-LedgerTxnRoot::Impl::bulkLoadTrustLines(
-    UnorderedSet<LedgerKey> const& keys) const
-{
-    ZoneScoped;
-    ZoneValue(static_cast<int64_t>(keys.size()));
-    if (!keys.empty())
-    {
-        BulkLoadTrustLinesOperation op(mApp.getDatabase(), keys);
-        return populateLoadedEntries(
-            keys, mApp.getDatabase().doDatabaseTypeSpecificOperation(op));
-    }
-    else
-    {
-        return {};
-    }
-}
-}
diff --git a/src/ledger/NetworkConfig.cpp b/src/ledger/NetworkConfig.cpp
index e1ae2e43a0..9922343e70 100644
--- a/src/ledger/NetworkConfig.cpp
+++ b/src/ledger/NetworkConfig.cpp
@@ -920,7 +920,7 @@ initialBucketListSizeWindow(Application& app)
     // copies of the current BL size. If the bucketlist is disabled for
     // testing, just fill with ones to avoid triggering asserts.
     auto blSize = app.getConfig().MODE_ENABLES_BUCKETLIST
-                      ? app.getBucketManager().getBucketList().getSize()
+                      ? app.getBucketManager().getLiveBucketList().getSize()
                       : 1;
     for (uint64_t i = 0;
          i < InitialSorobanNetworkConfig::BUCKET_LIST_SIZE_WINDOW_SAMPLE_SIZE;
@@ -1046,7 +1046,7 @@ SorobanNetworkConfig::isValidConfigSettingEntry(ConfigSettingEntry const& cfg,
             cfg.stateArchivalSettings().startingEvictionScanLevel >=
                 MinimumSorobanNetworkConfig::STARTING_EVICTION_LEVEL &&
             cfg.stateArchivalSettings().startingEvictionScanLevel <
-                BucketList::kNumLevels &&
+                LiveBucketList::kNumLevels &&
             cfg.stateArchivalSettings().bucketListWindowSamplePeriod >=
                 MinimumSorobanNetworkConfig::BUCKETLIST_WINDOW_SAMPLE_PERIOD;
 
@@ -1698,7 +1698,7 @@ SorobanNetworkConfig::maybeSnapshotBucketListSize(uint32_t currLedger,
         // Update in memory snapshots
         mBucketListSizeSnapshots.pop_front();
         mBucketListSizeSnapshots.push_back(
-            app.getBucketManager().getBucketList().getSize());
+            app.getBucketManager().getLiveBucketList().getSize());
 
         writeBucketListSizeWindow(ltx);
         updateBucketListSizeAverage();
@@ -1861,13 +1861,12 @@ SorobanNetworkConfig::writeAllSettings(AbstractLedgerTxn& ltx,
 
     // If testing with BucketListDB, we need to commit directly to the
     // BucketList
-    if (app.getConfig().isUsingBucketListDB())
+    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         auto lcl = app.getLedgerManager().getLastClosedLedgerHeader();
         lcl.header.ledgerSeq += 1;
-        BucketTestUtils::addBatchAndUpdateSnapshot(
-            app.getBucketManager().getBucketList(), app, lcl.header, {},
-            entries, {});
+        BucketTestUtils::addLiveBatchAndUpdateSnapshot(app, lcl.header, {},
+                                                       entries, {});
     }
 }
 #endif
diff --git a/src/ledger/test/LedgerCloseMetaStreamTests.cpp b/src/ledger/test/LedgerCloseMetaStreamTests.cpp
index efdff716d5..b4c5dbe304 100644
--- a/src/ledger/test/LedgerCloseMetaStreamTests.cpp
+++ b/src/ledger/test/LedgerCloseMetaStreamTests.cpp
@@ -91,9 +91,8 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
         Config cfg4 = getTestConfig(4);
         Config cfg5 = getTestConfig(
             5,
-            Config::
-                TESTDB_IN_MEMORY_NO_OFFERS); // needed by
-                                             // EXPERIMENTAL_PRECAUTION_DELAY_META
+            Config::TESTDB_IN_MEMORY); // needed by
+                                       // EXPERIMENTAL_PRECAUTION_DELAY_META
 
         // Step 2: open writable files and pass them to configs 4 and 5
         // (watchers).
@@ -240,111 +239,6 @@ TEST_CASE("LedgerCloseMetaStream file descriptor - LIVE_NODE",
             std::vector<LedgerCloseMeta>(lcms.begin(), lcms.end() - 1));
 }
 
-TEST_CASE("LedgerCloseMetaStream file descriptor - REPLAY_IN_MEMORY",
-          "[ledgerclosemetastreamreplay]")
-{
-    // Step 1: generate some history for replay.
-    using namespace stellar::historytestutils;
-    TmpDirHistoryConfigurator tCfg;
-    {
-        Config genCfg = getTestConfig(0, Config::TESTDB_DEFAULT);
-        genCfg.MANUAL_CLOSE = false;
-        VirtualClock genClock;
-        genCfg = tCfg.configure(genCfg, true);
-        auto genApp = createTestApplication(genClock, genCfg);
-        auto& genHam = genApp->getHistoryArchiveManager();
-        genHam.initializeHistoryArchive(tCfg.getArchiveDirName());
-        for (size_t i = 0; i < 100; ++i)
-        {
-            genClock.crank(false);
-        }
-        auto& genHm = genApp->getHistoryManager();
-        while (genHm.getPublishSuccessCount() < 5)
-        {
-            genClock.crank(true);
-        }
-        while (genClock.cancelAllEvents() ||
-               genApp->getProcessManager().getNumRunningProcesses() > 0)
-        {
-            genClock.crank(false);
-        }
-    }
-
-    // Step 2: open a writable file descriptor.
-    TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8)));
-    TmpDir td = tdm.tmpDir("streams");
-    std::string metaPath = td.getName() + "/stream.xdr";
-    auto cfg1 = getTestConfig(1);
-#ifdef _WIN32
-    cfg1.METADATA_OUTPUT_STREAM = metaPath;
-#else
-    int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644);
-    REQUIRE(fd != -1);
-    cfg1.METADATA_OUTPUT_STREAM = fmt::format(FMT_STRING("fd:{}"), fd);
-#endif
-
-    bool const delayMeta = GENERATE(true, false);
-
-    // Step 3: pass it to an application and have it catch up to the generated
-    // history, streaming ledgerCloseMeta to the file descriptor.
-    Hash hash;
-    {
-        auto cfg = tCfg.configure(cfg1, false);
-        cfg.NODE_IS_VALIDATOR = false;
-        cfg.FORCE_SCP = false;
-        cfg.RUN_STANDALONE = true;
-        cfg.setInMemoryMode();
-        cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta;
-        VirtualClock clock;
-        auto app = createTestApplication(clock, cfg, /*newdb=*/false);
-
-        CatchupConfiguration cc{CatchupConfiguration::CURRENT,
-                                std::numeric_limits<uint32_t>::max(),
-                                CatchupConfiguration::Mode::OFFLINE_COMPLETE};
-        Json::Value catchupInfo;
-        auto& ham = app->getHistoryArchiveManager();
-        auto& lm = app->getLedgerManager();
-        auto archive = ham.selectRandomReadableHistoryArchive();
-        int res = catchup(app, cc, catchupInfo, archive);
-        REQUIRE(res == 0);
-        hash = lm.getLastClosedLedgerHeader().hash;
-        while (clock.cancelAllEvents() ||
-               app->getProcessManager().getNumRunningProcesses() > 0)
-        {
-            clock.crank(false);
-        }
-    }
-
-    // Step 4: reopen the file as an XDR stream and read back the LCMs
-    // and check they have the expected content.
-    //
-    // The EXPERIMENTAL_PRECAUTION_DELAY_META case should still have streamed
-    // the latest meta, because catchup should have validated that ledger's hash
-    // by validating a chain of hashes back from one obtained from consensus.
-    XDRInputFileStream stream;
-    stream.open(metaPath);
-    LedgerCloseMeta lcm;
-    size_t nLcm = 1;
-    while (stream && stream.readOne(lcm))
-    {
-        ++nLcm;
-    }
-    // 5 checkpoints is ledger 0x13f
-    REQUIRE(nLcm == 0x13f);
-    if (lcm.v() == 0)
-    {
-        REQUIRE(lcm.v0().ledgerHeader.hash == hash);
-    }
-    else if (lcm.v() == 1)
-    {
-        REQUIRE(lcm.v1().ledgerHeader.hash == hash);
-    }
-    else
-    {
-        REQUIRE(false);
-    }
-}
-
 TEST_CASE("EXPERIMENTAL_PRECAUTION_DELAY_META configuration",
           "[ledgerclosemetastreamlive][ledgerclosemetastreamreplay]")
 {
@@ -356,49 +250,9 @@ TEST_CASE("EXPERIMENTAL_PRECAUTION_DELAY_META configuration",
     {
         cfg.METADATA_OUTPUT_STREAM = "";
         auto const delayMeta = GENERATE(false, true);
-        auto const inMemory = GENERATE(false, true);
         cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta;
-        if (inMemory)
-        {
-            cfg.setInMemoryMode();
-        }
         REQUIRE_NOTHROW(createTestApplication(clock, cfg));
     }
-
-    SECTION("EXPERIMENTAL_PRECAUTION_DELAY_META together with "
-            "METADATA_OUTPUT_STREAM requires --in-memory")
-    {
-        TmpDirManager tdm(std::string("streamtmp-") + binToHex(randomBytes(8)));
-        TmpDir td = tdm.tmpDir("streams");
-        std::string metaPath = td.getName() + "/stream.xdr";
-        std::string metaStream;
-
-#ifdef _WIN32
-        metaStream = metaPath;
-#else
-        int fd = ::open(metaPath.c_str(), O_CREAT | O_WRONLY, 0644);
-        REQUIRE(fd != -1);
-        metaStream = fmt::format(FMT_STRING("fd:{}"), fd);
-#endif
-
-        cfg.METADATA_OUTPUT_STREAM = metaStream;
-        auto const delayMeta = GENERATE(false, true);
-        auto const inMemory = GENERATE(false, true);
-        cfg.EXPERIMENTAL_PRECAUTION_DELAY_META = delayMeta;
-        if (inMemory)
-        {
-            cfg.setInMemoryMode();
-        }
-        if (delayMeta && !inMemory)
-        {
-            REQUIRE_THROWS_AS(createTestApplication(clock, cfg),
-                              std::invalid_argument);
-        }
-        else
-        {
-            REQUIRE_NOTHROW(createTestApplication(clock, cfg));
-        }
-    }
 }
 
 TEST_CASE("METADATA_DEBUG_LEDGERS works", "[metadebug]")
diff --git a/src/ledger/test/LedgerTestUtils.cpp b/src/ledger/test/LedgerTestUtils.cpp
index 6835e3445f..fec76658ce 100644
--- a/src/ledger/test/LedgerTestUtils.cpp
+++ b/src/ledger/test/LedgerTestUtils.cpp
@@ -15,6 +15,7 @@
 #include "util/types.h"
 #include "xdr/Stellar-contract.h"
 #include "xdr/Stellar-ledger-entries.h"
+#include "xdr/Stellar-types.h"
 #include <autocheck/generator.hpp>
 #include <locale>
 #include <string>
@@ -741,9 +742,41 @@ generateValidLedgerEntryWithTypes(
     }
 }
 
+std::vector<LedgerKey>
+generateValidUniqueLedgerKeysWithTypes(
+    std::unordered_set<LedgerEntryType> const& types, size_t n,
+    UnorderedSet<LedgerKey>& seenKeys)
+{
+    std::vector<LedgerKey> res;
+    res.reserve(n);
+    while (res.size() < n)
+    {
+
+        auto entry = generateValidLedgerEntryWithTypes(types);
+        auto key = LedgerEntryKey(entry);
+        if (seenKeys.find(key) != seenKeys.end())
+        {
+            continue;
+        }
+
+        seenKeys.insert(key);
+        res.emplace_back(key);
+    }
+    return res;
+}
+
 std::vector<LedgerEntry>
 generateValidUniqueLedgerEntriesWithTypes(
     std::unordered_set<LedgerEntryType> const& types, size_t n)
+{
+    UnorderedSet<LedgerKey> seenKeys;
+    return generateValidUniqueLedgerEntriesWithTypes(types, n, seenKeys);
+}
+
+std::vector<LedgerEntry>
+generateValidUniqueLedgerEntriesWithTypes(
+    std::unordered_set<LedgerEntryType> const& types, size_t n,
+    UnorderedSet<LedgerKey>& seenKeys)
 {
     UnorderedSet<LedgerKey> keys;
     std::vector<LedgerEntry> entries;
@@ -753,6 +786,12 @@ generateValidUniqueLedgerEntriesWithTypes(
     {
         auto entry = generateValidLedgerEntryWithTypes(types);
         auto key = LedgerEntryKey(entry);
+        auto [_, inserted] = seenKeys.insert(key);
+        if (!inserted)
+        {
+            continue;
+        }
+
         if (keys.find(key) != keys.end())
         {
             continue;
diff --git a/src/ledger/test/LedgerTestUtils.h b/src/ledger/test/LedgerTestUtils.h
index 27277b0ac3..889cb9752d 100644
--- a/src/ledger/test/LedgerTestUtils.h
+++ b/src/ledger/test/LedgerTestUtils.h
@@ -6,6 +6,8 @@
 
 #include "history/HistoryManager.h"
 #include "overlay/StellarXDR.h"
+#include "util/UnorderedSet.h"
+#include "util/types.h"
 
 namespace stellar
 {
@@ -45,6 +47,10 @@ std::vector<LedgerEntry> generateValidUniqueLedgerEntries(size_t n);
 std::vector<LedgerKey> generateValidLedgerEntryKeysWithExclusions(
     std::unordered_set<LedgerEntryType> const& excludedTypes, size_t n);
 
+std::vector<LedgerKey> generateValidUniqueLedgerKeysWithTypes(
+    std::unordered_set<LedgerEntryType> const& types, size_t n,
+    UnorderedSet<LedgerKey>& seenKeys);
+
 std::vector<LedgerKey> generateUniqueValidSorobanLedgerEntryKeys(size_t n);
 
 std::vector<LedgerKey> generateValidUniqueLedgerEntryKeysWithExclusions(
@@ -62,6 +68,9 @@ LedgerEntry generateValidLedgerEntryWithTypes(
     std::unordered_set<LedgerEntryType> const& types, size_t b = 3);
 std::vector<LedgerEntry> generateValidUniqueLedgerEntriesWithTypes(
     std::unordered_set<LedgerEntryType> const& types, size_t n);
+std::vector<LedgerEntry> generateValidUniqueLedgerEntriesWithTypes(
+    std::unordered_set<LedgerEntryType> const& types, size_t n,
+    UnorderedSet<LedgerKey>& seenKeys);
 
 AccountEntry generateValidAccountEntry(size_t b = 3);
 std::vector<AccountEntry> generateValidAccountEntries(size_t n);
diff --git a/src/ledger/test/LedgerTxnTests.cpp b/src/ledger/test/LedgerTxnTests.cpp
index ba3d2a698c..11f0a2c9fd 100644
--- a/src/ledger/test/LedgerTxnTests.cpp
+++ b/src/ledger/test/LedgerTxnTests.cpp
@@ -339,13 +339,18 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
     std::bernoulli_distribution shouldCommitDist;
 
     auto generateNew = [](AbstractLedgerTxn& ltx,
-                          UnorderedMap<LedgerKey, LedgerEntry>& entries) {
+                          UnorderedMap<LedgerKey, LedgerEntry>& entries,
+                          bool offerOnly) {
         size_t const NEW_ENTRIES = 100;
         UnorderedMap<LedgerKey, LedgerEntry> newBatch;
         while (newBatch.size() < NEW_ENTRIES)
         {
-            auto le = LedgerTestUtils::generateValidLedgerEntryWithExclusions(
-                {CONFIG_SETTING});
+            auto le =
+                offerOnly
+                    ? LedgerTestUtils::generateValidLedgerEntryOfType(OFFER)
+                    : LedgerTestUtils::generateValidLedgerEntryWithExclusions(
+                          {CONFIG_SETTING});
+
             auto key = LedgerEntryKey(le);
             if (entries.find(LedgerEntryKey(le)) == entries.end())
             {
@@ -428,7 +433,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
         }
     };
 
-    auto runTest = [&](AbstractLedgerTxnParent& ltxParent) {
+    auto runTest = [&](AbstractLedgerTxnParent& ltxParent, bool offerOnly) {
         UnorderedMap<LedgerKey, LedgerEntry> entries;
         UnorderedSet<LedgerKey> dead;
         size_t const NUM_BATCHES = 10;
@@ -439,7 +444,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
             UnorderedMap<LedgerKey, LedgerEntry> updatedEntries = entries;
             UnorderedSet<LedgerKey> updatedDead = dead;
             LedgerTxn ltx1(ltxParent);
-            generateNew(ltx1, updatedEntries);
+            generateNew(ltx1, updatedEntries, offerOnly);
             generateModify(ltx1, updatedEntries);
             generateErase(ltx1, updatedEntries, updatedDead);
 
@@ -459,7 +464,7 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
             auto app = createTestApplication(clock, getTestConfig(0, mode));
 
             LedgerTxn ltx1(app->getLedgerTxnRoot());
-            runTest(ltx1);
+            runTest(ltx1, false);
         }
 
         SECTION("round trip to LedgerTxnRoot")
@@ -468,13 +473,9 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
             {
                 VirtualClock clock;
                 // BucketListDB incompatible with direct root commits
-                auto app = createTestApplication(
-                    clock,
-                    getTestConfig(0, mode == Config::TESTDB_DEFAULT
-                                         ? Config::TESTDB_IN_MEMORY_NO_OFFERS
-                                         : mode));
+                auto app = createTestApplication(clock, getTestConfig(0, mode));
 
-                runTest(app->getLedgerTxnRoot());
+                runTest(app->getLedgerTxnRoot(), true);
             }
 
             SECTION("with no cache")
@@ -482,31 +483,23 @@ TEST_CASE("LedgerTxn round trip", "[ledgertxn]")
                 VirtualClock clock;
 
                 // BucketListDB incompatible with direct root commits
-                auto cfg =
-                    getTestConfig(0, mode == Config::TESTDB_DEFAULT
-                                         ? Config::TESTDB_IN_MEMORY_NO_OFFERS
-                                         : mode);
+                auto cfg = getTestConfig(0, mode);
                 cfg.ENTRY_CACHE_SIZE = 0;
                 auto app = createTestApplication(clock, cfg);
 
-                runTest(app->getLedgerTxnRoot());
+                runTest(app->getLedgerTxnRoot(), true);
             }
         }
     };
 
-    SECTION("default")
-    {
-        runTestWithDbMode(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTestWithDbMode(Config::TESTDB_ON_DISK_SQLITE);
+        runTestWithDbMode(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTestWithDbMode(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTestWithDbMode(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -713,19 +706,14 @@ TEST_CASE("LedgerTxn createWithoutLoading and updateWithoutLoading",
         }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -813,19 +801,14 @@ TEST_CASE("LedgerTxn erase", "[ledgertxn]")
             validate(ltx3, {});
         }
     };
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -918,19 +901,14 @@ TEST_CASE("LedgerTxn eraseWithoutLoading", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -1035,7 +1013,7 @@ testInflationWinners(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         testAtRoot(*app);
     }
@@ -1044,7 +1022,7 @@ testInflationWinners(
     if (updates.size() > 1)
     {
         VirtualClock clock;
-        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
         cfg.ENTRY_CACHE_SIZE = 0;
         auto app = createTestApplication(clock, cfg);
 
@@ -1055,7 +1033,7 @@ testInflationWinners(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         testInflationWinners(app->getLedgerTxnRoot(), maxWinners, minBalance,
                              expected, updates.cbegin(), updates.cend());
@@ -1384,19 +1362,14 @@ TEST_CASE("LedgerTxn loadHeader", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -1494,103 +1467,16 @@ TEST_CASE_VERSIONS("LedgerTxn load", "[ledgertxn]")
                 }
             });
         }
-
-        SECTION("load tests for all versions")
-        {
-            for_all_versions(*app, [&]() {
-                SECTION("invalid keys")
-                {
-                    LedgerTxn ltx1(app->getLedgerTxnRoot());
-
-                    auto acc = txtest::getAccount("acc");
-                    auto acc2 = txtest::getAccount("acc2");
-
-                    {
-                        auto native = txtest::makeNativeAsset();
-                        UNSCOPED_INFO("native asset on trustline key");
-
-                        // Invariant not supported in BucketListDB and in-memory
-                        // mode
-                        if (mode != Config::TESTDB_DEFAULT &&
-                            mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-                        {
-                            REQUIRE_THROWS_AS(ltx1.load(trustlineKey(
-                                                  acc.getPublicKey(), native)),
-                                              NonSociRelatedException);
-                        }
-                    }
-
-                    {
-                        auto usd = txtest::makeAsset(acc, "usd");
-                        UNSCOPED_INFO("issuer on trustline key");
-
-                        // Invariant not supported in BucketListDB and in-memory
-                        // mode
-                        if (mode != Config::TESTDB_DEFAULT &&
-                            mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-                        {
-                            REQUIRE_THROWS_AS(ltx1.load(trustlineKey(
-                                                  acc.getPublicKey(), usd)),
-                                              NonSociRelatedException);
-                        }
-                    }
-
-                    {
-                        std::string accountIDStr, issuerStr, assetCodeStr;
-                        auto invalidAssets = testutil::getInvalidAssets(acc);
-                        for (auto const& asset : invalidAssets)
-                        {
-                            auto key = trustlineKey(acc2.getPublicKey(), asset);
-
-                            // Invariant not supported in BucketListDB and
-                            // in-memory mode
-                            if (mode != Config::TESTDB_DEFAULT &&
-                                mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-                            {
-                                REQUIRE_THROWS_AS(ltx1.load(key),
-                                                  NonSociRelatedException);
-                            }
-                        }
-                    }
-
-                    SECTION("load generated keys")
-                    {
-                        for (int i = 0; i < 1000; ++i)
-                        {
-                            LedgerKey lk = autocheck::generator<LedgerKey>()(5);
-
-                            try
-                            {
-                                ltx1.load(lk);
-                            }
-                            catch (NonSociRelatedException&)
-                            {
-                                // this is fine
-                            }
-                            catch (std::exception&)
-                            {
-                                REQUIRE(false);
-                            }
-                        }
-                    }
-                }
-            });
-        }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -1933,19 +1819,14 @@ TEST_CASE("LedgerTxn loadAllOffers", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(Config::TESTDB_DEFAULT);
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -2334,14 +2215,19 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]")
                     loadAccount(ltx2, account.accountID);
                 }
 
-                // Note that we can't prefetch for more than 1000 offers
-                double expectedPrefetchHitRate =
-                    std::min(numOffers - offerID,
-                             static_cast<int64_t>(getMaxOffersToCross())) /
-                    static_cast<double>(accounts.size());
-                REQUIRE(fabs(expectedPrefetchHitRate -
-                             ltx2.getPrefetchHitRate()) < .000001);
-                REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate());
+                // Prefetch doesn't work in in-memory mode, but this is for
+                // testing only so we only care about accuracy
+                if (mode != Config::TESTDB_IN_MEMORY)
+                {
+                    // Note that we can't prefetch for more than 1000 offers
+                    double expectedPrefetchHitRate =
+                        std::min(numOffers - offerID,
+                                 static_cast<int64_t>(getMaxOffersToCross())) /
+                        static_cast<double>(accounts.size());
+                    REQUIRE(fabs(expectedPrefetchHitRate -
+                                 ltx2.getPrefetchHitRate()) < .000001);
+                    REQUIRE(preLoadPrefetchHitRate < ltx2.getPrefetchHitRate());
+                }
             };
 
             SECTION("prefetch for all worse remaining offers")
@@ -2362,14 +2248,16 @@ TEST_CASE("LedgerTxn loadBestOffer", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
-    SECTION("sqlite")
+    // This mode is only used in testing, but we should still make sure it works
+    // for other tests that leverage it
+    SECTION("in-memory")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -2738,7 +2626,7 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]")
             e.lastModifiedLedgerSeq = 1;
             entrySet.emplace(e);
         }
-        if (cfg.isUsingBucketListDB())
+        if (!cfg.MODE_USES_IN_MEMORY_LEDGER)
         {
             std::vector<LedgerEntry> ledgerVect{entrySet.begin(),
                                                 entrySet.end()};
@@ -2747,9 +2635,8 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]")
                                    .getLastClosedLedgerHeader()
                                    .header.ledgerVersion;
             lh.ledgerSeq = 2;
-            BucketTestUtils::addBatchAndUpdateSnapshot(
-                app->getBucketManager().getBucketList(), *app, lh, {},
-                ledgerVect, {});
+            BucketTestUtils::addLiveBatchAndUpdateSnapshot(*app, lh, {},
+                                                           ledgerVect, {});
         }
         ltx.commit();
 
@@ -2790,14 +2677,9 @@ TEST_CASE("LedgerTxnRoot prefetch classic entries", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
-    {
-        runTest(getTestConfig());
-    }
-
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE));
+        runTest(getTestConfig(Config::TESTDB_BUCKET_DB_PERSISTENT));
     }
 
 #ifdef USE_POSTGRES
@@ -2822,7 +2704,9 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]")
         {
             // First add some bulking entries so we're not using a
             // totally empty database.
-            entries = LedgerTestUtils::generateValidLedgerEntries(n);
+            entries =
+                LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                    {OFFER}, n);
             LedgerTxn ltx(app->getLedgerTxnRoot());
             for (auto e : entries)
             {
@@ -2832,7 +2716,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]")
         }
 
         // Then do some precise timed creates.
-        entries = LedgerTestUtils::generateValidLedgerEntries(n);
+        entries = LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+            {OFFER}, n);
         auto& m =
             app->getMetrics().NewMeter({"ledger", "create", "commit"}, "entry");
         while (!entries.empty())
@@ -2859,8 +2744,8 @@ TEST_CASE("Create performance benchmark", "[!hide][createbench]")
 
     SECTION("sqlite")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE, true);
-        runTest(Config::TESTDB_ON_DISK_SQLITE, false);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false);
     }
 
 #ifdef USE_POSTGRES
@@ -2886,7 +2771,9 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]")
         {
             // First add some bulking entries so we're not using a
             // totally empty database.
-            entries = LedgerTestUtils::generateValidLedgerEntries(n);
+            entries =
+                LedgerTestUtils::generateValidUniqueLedgerEntriesWithTypes(
+                    {OFFER}, n);
             LedgerTxn ltx(app->getLedgerTxnRoot());
             for (auto e : entries)
             {
@@ -2922,8 +2809,8 @@ TEST_CASE("Erase performance benchmark", "[!hide][erasebench]")
 
     SECTION("sqlite")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE, true);
-        runTest(Config::TESTDB_ON_DISK_SQLITE, false);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, true);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, false);
     }
 
 #ifdef USE_POSTGRES
@@ -2942,7 +2829,6 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]")
 
     // Test setup.
     VirtualClock clock;
-    cfg.DEPRECATED_SQL_LEDGER_STATE = false;
     Application::pointer app = createTestApplication(clock, cfg);
     UnorderedSet<LedgerKey> keysToPrefetch;
     auto& root = app->getLedgerTxnRoot();
@@ -2980,9 +2866,8 @@ TEST_CASE("LedgerTxnRoot prefetch soroban entries", "[ledgertxn]")
                            .getLastClosedLedgerHeader()
                            .header.ledgerVersion;
     lh.ledgerSeq = 2;
-    BucketTestUtils::addBatchAndUpdateSnapshot(
-        app->getBucketManager().getBucketList(), *app, lh, {}, ledgerVect,
-        deadKeyVect);
+    BucketTestUtils::addLiveBatchAndUpdateSnapshot(*app, lh, {}, ledgerVect,
+                                                   deadKeyVect);
     ltx.commit();
 
     auto addTxn = [&](bool enoughQuota, std::vector<LedgerEntry> entries,
@@ -3150,219 +3035,6 @@ TEST_CASE("LedgerKeyMeter tests")
     REQUIRE(lkMeter.canLoad(ttlKey, std::numeric_limits<std::uint32_t>::max()));
 }
 
-TEST_CASE("Bulk load batch size benchmark", "[!hide][bulkbatchsizebench]")
-{
-    size_t floor = 1000;
-    size_t ceiling = 20000;
-    size_t bestBatchSize = 0;
-    double bestTime = 0xffffffff;
-
-    auto runTest = [&](Config::TestDbMode mode) {
-        for (; floor <= ceiling; floor += 1000)
-        {
-            UnorderedSet<LedgerKey> keys;
-            VirtualClock clock;
-            Config cfg(getTestConfig(0, mode));
-            cfg.PREFETCH_BATCH_SIZE = floor;
-
-            auto app = createTestApplication(clock, cfg);
-
-            auto& root = app->getLedgerTxnRoot();
-
-            auto entries = LedgerTestUtils::generateValidLedgerEntries(50000);
-            LedgerTxn ltx(root);
-            for (auto e : entries)
-            {
-                ltx.createWithoutLoading(e);
-                keys.insert(LedgerEntryKey(e));
-            }
-            ltx.commit();
-
-            auto& m = app->getMetrics().NewTimer(
-                {"ledger", "bulk-load", std::to_string(floor) + " batch"});
-            LedgerTxn ltx2(root);
-            {
-                m.TimeScope();
-                root.prefetchClassic(keys);
-            }
-            ltx2.commit();
-
-            auto total = m.sum();
-            CLOG_INFO(Ledger, "Bulk Load test batch size: {} took {}", floor,
-                      total);
-
-            if (total < bestTime)
-            {
-                bestBatchSize = floor;
-                bestTime = total;
-            }
-        }
-        CLOG_INFO(Ledger, "Best batch and best time per entry {} : {}",
-                  bestBatchSize, bestTime);
-    };
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
-    }
-
-#ifdef USE_POSTGRES
-    SECTION("postgresql")
-    {
-        runTest(Config::TESTDB_POSTGRESQL);
-    }
-#endif
-}
-
-TEST_CASE("Signers performance benchmark", "[!hide][signersbench]")
-{
-    auto getTimeScope = [](Application& app, uint32_t numSigners,
-                           std::string const& phase) {
-        std::string benchmarkStr = "benchmark-" + std::to_string(numSigners);
-        return app.getMetrics()
-            .NewTimer({"signers", benchmarkStr, phase})
-            .TimeScope();
-    };
-
-    auto getTimeSpent = [](Application& app, uint32_t numSigners,
-                           std::string const& phase) {
-        std::string benchmarkStr = "benchmark-" + std::to_string(numSigners);
-        auto time =
-            app.getMetrics().NewTimer({"signers", benchmarkStr, phase}).sum();
-        return phase + ": " + std::to_string(time) + " ms";
-    };
-
-    auto generateEntries = [](size_t numAccounts, uint32_t numSigners) {
-        std::vector<LedgerEntry> accounts;
-        accounts.reserve(numAccounts);
-        for (size_t i = 0; i < numAccounts; ++i)
-        {
-            LedgerEntry le;
-            le.data.type(ACCOUNT);
-            le.lastModifiedLedgerSeq = 2;
-            le.data.account() = LedgerTestUtils::generateValidAccountEntry();
-
-            auto& signers = le.data.account().signers;
-            if (signers.size() > numSigners)
-            {
-                signers.resize(numSigners);
-            }
-            else if (signers.size() < numSigners)
-            {
-                signers.reserve(numSigners);
-                std::generate_n(std::back_inserter(signers),
-                                numSigners - signers.size(),
-                                std::bind(autocheck::generator<Signer>(), 5));
-                std::sort(signers.begin(), signers.end(),
-                          [](Signer const& lhs, Signer const& rhs) {
-                              return lhs.key < rhs.key;
-                          });
-            }
-
-            accounts.emplace_back(le);
-        }
-        return accounts;
-    };
-
-    auto generateKeys = [](std::vector<LedgerEntry> const& accounts) {
-        std::vector<LedgerKey> keys;
-        keys.reserve(accounts.size());
-        std::transform(
-            accounts.begin(), accounts.end(), std::back_inserter(keys),
-            [](LedgerEntry const& le) { return LedgerEntryKey(le); });
-        return keys;
-    };
-
-    auto writeEntries =
-        [&getTimeScope](Application& app, uint32_t numSigners,
-                        std::vector<LedgerEntry> const& accounts) {
-            CLOG_WARNING(Ledger, "Creating accounts");
-            LedgerTxn ltx(app.getLedgerTxnRoot());
-            {
-                auto timer = getTimeScope(app, numSigners, "create");
-                for (auto const& le : accounts)
-                {
-                    ltx.create(le);
-                }
-            }
-
-            CLOG_WARNING(Ledger, "Writing accounts");
-            {
-                auto timer = getTimeScope(app, numSigners, "write");
-                ltx.commit();
-            }
-        };
-
-    auto readEntriesAndUpdateLastModified =
-        [&getTimeScope](Application& app, uint32_t numSigners,
-                        std::vector<LedgerKey> const& accounts) {
-            CLOG_WARNING(Ledger, "Reading accounts");
-            LedgerTxn ltx(app.getLedgerTxnRoot());
-            {
-                auto timer = getTimeScope(app, numSigners, "read");
-                for (auto const& key : accounts)
-                {
-                    ++ltx.load(key).current().lastModifiedLedgerSeq;
-                }
-            }
-
-            CLOG_WARNING(Ledger, "Writing accounts with unchanged signers");
-            {
-                auto timer = getTimeScope(app, numSigners, "rewrite");
-                ltx.commit();
-            }
-        };
-
-    auto runTest = [&](Config::TestDbMode mode, size_t numAccounts,
-                       uint32_t numSigners) {
-        VirtualClock clock;
-        Config cfg(getTestConfig(0, mode));
-        cfg.ENTRY_CACHE_SIZE = 0;
-        Application::pointer app = createTestApplication(clock, cfg);
-
-        CLOG_WARNING(Ledger, "Generating {} accounts with {} signers each",
-                     numAccounts, numSigners);
-        auto accounts = generateEntries(numAccounts, numSigners);
-        auto keys = generateKeys(accounts);
-
-        writeEntries(*app, numSigners, accounts);
-        readEntriesAndUpdateLastModified(*app, numSigners, keys);
-
-        CLOG_WARNING(Ledger, "Done ({}, {}, {}, {})",
-                     getTimeSpent(*app, numSigners, "create"),
-                     getTimeSpent(*app, numSigners, "write"),
-                     getTimeSpent(*app, numSigners, "read"),
-                     getTimeSpent(*app, numSigners, "rewrite"));
-    };
-
-    auto runTests = [&](Config::TestDbMode mode) {
-        SECTION("0 signers")
-        {
-            runTest(mode, 100000, 0);
-        }
-        SECTION("10 signers")
-        {
-            runTest(mode, 100000, 10);
-        }
-        SECTION("20 signers")
-        {
-            runTest(mode, 100000, 20);
-        }
-    };
-
-    SECTION("sqlite")
-    {
-        runTests(Config::TESTDB_ON_DISK_SQLITE);
-    }
-
-#ifdef USE_POSTGRES
-    SECTION("postgresql")
-    {
-        runTests(Config::TESTDB_POSTGRESQL);
-    }
-#endif
-}
-
 TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]")
 {
     auto getTimeScope = [](Application& app, std::string const& phase) {
@@ -3532,7 +3204,7 @@ TEST_CASE("Load best offers benchmark", "[!hide][bestoffersbench]")
 
     SECTION("sqlite")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE, 10, 5, 25000);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT, 10, 5, 25000);
     }
 }
 
@@ -3938,14 +3610,16 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]")
         }
     };
 
-    SECTION("default")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_DEFAULT);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
-    SECTION("sqlite")
+    // This mode is just used for testing, but we should still make sure it
+    // works
+    SECTION("in-memory")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -3956,7 +3630,7 @@ TEST_CASE("LedgerTxn in memory order book", "[ledgertxn]")
 #endif
 }
 
-TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]")
+TEST_CASE("Access deactivated entry", "[ledgertxn]")
 {
     auto runTest = [&](Config::TestDbMode mode) {
         VirtualClock clock;
@@ -3966,47 +3640,6 @@ TEST_CASE_VERSIONS("LedgerTxn bulk-load offers", "[ledgertxn]")
         le1.data.type(OFFER);
         le1.data.offer() = LedgerTestUtils::generateValidOfferEntry();
 
-        LedgerKey lk1 = LedgerEntryKey(le1);
-        auto lk2 = lk1;
-        lk2.offer().sellerID =
-            LedgerTestUtils::generateValidOfferEntry().sellerID;
-
-        {
-            LedgerTxn ltx(app->getLedgerTxnRoot());
-            ltx.create(le1);
-            ltx.commit();
-        }
-
-        for_all_versions(*app, [&]() {
-            app->getLedgerTxnRoot().prefetchClassic({lk1, lk2});
-            LedgerTxn ltx(app->getLedgerTxnRoot());
-            REQUIRE(ltx.load(lk1));
-        });
-    };
-
-    SECTION("sqlite")
-    {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
-    }
-
-#ifdef USE_POSTGRES
-    SECTION("postgresql")
-    {
-        runTest(Config::TESTDB_POSTGRESQL);
-    }
-#endif
-}
-
-TEST_CASE("Access deactivated entry", "[ledgertxn]")
-{
-    auto runTest = [&](Config::TestDbMode mode) {
-        VirtualClock clock;
-        auto app = createTestApplication(clock, getTestConfig(0, mode));
-
-        LedgerEntry le1;
-        le1.data.type(DATA);
-        le1.data.data() = LedgerTestUtils::generateValidDataEntry();
-
         LedgerKey lk1 = LedgerEntryKey(le1);
 
         {
@@ -4122,14 +3755,14 @@ TEST_CASE("Access deactivated entry", "[ledgertxn]")
         }
     };
 
-    SECTION("sqlite")
+    SECTION("bucketlist")
     {
-        runTest(Config::TESTDB_ON_DISK_SQLITE);
+        runTest(Config::TESTDB_BUCKET_DB_PERSISTENT);
     }
 
     SECTION("in-memory")
     {
-        runTest(Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        runTest(Config::TESTDB_IN_MEMORY);
     }
 
 #ifdef USE_POSTGRES
@@ -4185,7 +3818,7 @@ TEST_CASE("LedgerTxn generalized ledger entries", "[ledgertxn]")
 TEST_CASE("LedgerTxn best offers cache eviction", "[ledgertxn]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     auto app = createTestApplication(clock, cfg);
 
     auto buying = autocheck::generator<Asset>()(UINT32_MAX);
@@ -4402,7 +4035,7 @@ testPoolShareTrustLinesByAccountAndAsset(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         for_versions_from(18, *app, [&] { testAtRoot(*app); });
     }
@@ -4411,7 +4044,7 @@ testPoolShareTrustLinesByAccountAndAsset(
     if (updates.size() > 1)
     {
         VirtualClock clock;
-        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+        auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
         cfg.ENTRY_CACHE_SIZE = 0;
         auto app = createTestApplication(clock, cfg);
 
@@ -4422,7 +4055,7 @@ testPoolShareTrustLinesByAccountAndAsset(
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         for_versions_from(18, *app, [&] {
             testPoolShareTrustLinesByAccountAndAsset(
@@ -4450,7 +4083,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset",
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         LedgerTxn ltx1(app->getLedgerTxnRoot());
         LedgerTxn ltx2(ltx1);
@@ -4463,7 +4096,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset",
     {
         VirtualClock clock;
         auto app = createTestApplication(
-            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+            clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
         LedgerTxn ltx1(app->getLedgerTxnRoot());
         ltx1.getDelta();
@@ -4534,7 +4167,7 @@ TEST_CASE_VERSIONS("LedgerTxn loadPoolShareTrustLinesByAccountAndAsset",
 TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4576,7 +4209,7 @@ TEST_CASE("InMemoryLedgerTxn simulate buckets", "[ledgertxn]")
 TEST_CASE("InMemoryLedgerTxn getOffersByAccountAndAsset", "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4620,7 +4253,7 @@ TEST_CASE("InMemoryLedgerTxn getPoolShareTrustLinesByAccountAndAsset",
           "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4669,7 +4302,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges",
                    "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
 
@@ -4693,7 +4326,7 @@ TEST_CASE_VERSIONS("InMemoryLedgerTxn close multiple ledgers with merges",
 TEST_CASE("InMemoryLedgerTxn filtering", "[ledgertxn]")
 {
     VirtualClock clock;
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     auto app = createTestApplication(clock, cfg);
     auto root = TestAccount::createRoot(*app);
diff --git a/src/main/ApplicationImpl.cpp b/src/main/ApplicationImpl.cpp
index ef2ced2f04..dc12cd4833 100644
--- a/src/main/ApplicationImpl.cpp
+++ b/src/main/ApplicationImpl.cpp
@@ -84,12 +84,8 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg)
     : mVirtualClock(clock)
     , mConfig(cfg)
     // Allocate one worker to eviction when background eviction enabled
-    , mWorkerIOContext(mConfig.isUsingBackgroundEviction()
-                           ? mConfig.WORKER_THREADS - 1
-                           : mConfig.WORKER_THREADS)
-    , mEvictionIOContext(mConfig.isUsingBackgroundEviction()
-                             ? std::make_unique<asio::io_context>(1)
-                             : nullptr)
+    , mWorkerIOContext(mConfig.WORKER_THREADS - 1)
+    , mEvictionIOContext(std::make_unique<asio::io_context>(1))
     , mWork(std::make_unique<asio::io_context::work>(mWorkerIOContext))
     , mEvictionWork(
           mEvictionIOContext
@@ -157,19 +153,16 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg)
     auto t = mConfig.WORKER_THREADS;
     LOG_DEBUG(DEFAULT_LOG, "Application constructing (worker threads: {})", t);
 
-    if (mConfig.isUsingBackgroundEviction())
-    {
-        releaseAssert(mConfig.WORKER_THREADS > 0);
-        releaseAssert(mEvictionIOContext);
+    releaseAssert(mConfig.WORKER_THREADS > 0);
+    releaseAssert(mEvictionIOContext);
 
-        // Allocate one thread for Eviction scan
-        mEvictionThread = std::thread{[this]() {
-            runCurrentThreadWithMediumPriority();
-            mEvictionIOContext->run();
-        }};
+    // Allocate one thread for Eviction scan
+    mEvictionThread = std::thread{[this]() {
+        runCurrentThreadWithMediumPriority();
+        mEvictionIOContext->run();
+    }};
 
-        --t;
-    }
+    --t;
 
     while (t--)
     {
@@ -190,92 +183,17 @@ ApplicationImpl::ApplicationImpl(VirtualClock& clock, Config const& cfg)
 static void
 maybeRebuildLedger(Application& app, bool applyBuckets)
 {
-    std::set<LedgerEntryType> toDrop;
-    std::set<LedgerEntryType> toRebuild;
     auto& ps = app.getPersistentState();
-    auto bucketListDBEnabled = app.getConfig().isUsingBucketListDB();
-    for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-    {
-        // If BucketListDB is enabled, drop all tables except for offers
-        LedgerEntryType t = static_cast<LedgerEntryType>(let);
-        if (let != OFFER && bucketListDBEnabled)
-        {
-            toDrop.emplace(t);
-            continue;
-        }
 
-        if (ps.shouldRebuildForType(t))
-        {
-            toRebuild.emplace(t);
-        }
-    }
-
-    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    if (ps.shouldRebuildForOfferTable())
     {
         app.getDatabase().clearPreparedStatementCache();
         soci::transaction tx(app.getDatabase().getSession());
+        LOG_INFO(DEFAULT_LOG, "Dropping offers");
+        app.getLedgerTxnRoot().dropOffers(/*rebuild=*/true);
 
-        auto loopEntries = [&](auto const& entryTypeSet, bool shouldRebuild) {
-            for (auto let : entryTypeSet)
-            {
-                switch (let)
-                {
-                case ACCOUNT:
-                    LOG_INFO(DEFAULT_LOG, "Dropping accounts");
-                    app.getLedgerTxnRoot().dropAccounts(shouldRebuild);
-                    break;
-                case TRUSTLINE:
-                    LOG_INFO(DEFAULT_LOG, "Dropping trustlines");
-                    app.getLedgerTxnRoot().dropTrustLines(shouldRebuild);
-                    break;
-                case OFFER:
-                    LOG_INFO(DEFAULT_LOG, "Dropping offers");
-                    app.getLedgerTxnRoot().dropOffers(shouldRebuild);
-                    break;
-                case DATA:
-                    LOG_INFO(DEFAULT_LOG, "Dropping accountdata");
-                    app.getLedgerTxnRoot().dropData(shouldRebuild);
-                    break;
-                case CLAIMABLE_BALANCE:
-                    LOG_INFO(DEFAULT_LOG, "Dropping claimablebalances");
-                    app.getLedgerTxnRoot().dropClaimableBalances(shouldRebuild);
-                    break;
-                case LIQUIDITY_POOL:
-                    LOG_INFO(DEFAULT_LOG, "Dropping liquiditypools");
-                    app.getLedgerTxnRoot().dropLiquidityPools(shouldRebuild);
-                    break;
-                case CONTRACT_DATA:
-                    LOG_INFO(DEFAULT_LOG, "Dropping contractdata");
-                    app.getLedgerTxnRoot().dropContractData(shouldRebuild);
-                    break;
-                case CONTRACT_CODE:
-                    LOG_INFO(DEFAULT_LOG, "Dropping contractcode");
-                    app.getLedgerTxnRoot().dropContractCode(shouldRebuild);
-                    break;
-                case CONFIG_SETTING:
-                    LOG_INFO(DEFAULT_LOG, "Dropping configsettings");
-                    app.getLedgerTxnRoot().dropConfigSettings(shouldRebuild);
-                    break;
-                case TTL:
-                    LOG_INFO(DEFAULT_LOG, "Dropping ttl");
-                    app.getLedgerTxnRoot().dropTTL(shouldRebuild);
-                    break;
-                default:
-                    abort();
-                }
-            }
-        };
-
-        loopEntries(toRebuild, true);
-        loopEntries(toDrop, false);
         tx.commit();
 
-        // Nothing to apply, exit early
-        if (toRebuild.empty())
-        {
-            return;
-        }
-
         // No transaction is needed. ApplyBucketsWork breaks the apply into many
         // small chunks, each of which has its own transaction. If it fails at
         // some point in the middle, then rebuildledger will not be cleared so
@@ -284,10 +202,7 @@ maybeRebuildLedger(Application& app, bool applyBuckets)
         {
             LOG_INFO(DEFAULT_LOG,
                      "Rebuilding ledger tables by applying buckets");
-            auto filter = [&toRebuild](LedgerEntryType t) {
-                return toRebuild.find(t) != toRebuild.end();
-            };
-            if (!applyBucketsForLCL(app, filter))
+            if (!applyBucketsForLCL(app))
             {
                 throw std::runtime_error("Could not rebuild ledger tables");
             }
@@ -295,10 +210,7 @@ maybeRebuildLedger(Application& app, bool applyBuckets)
         }
     }
 
-    for (auto let : toRebuild)
-    {
-        ps.clearRebuildForType(let);
-    }
+    ps.clearRebuildForOfferTable();
 }
 
 void
@@ -331,29 +243,29 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild)
     mBanManager = BanManager::create(*this);
     mStatusManager = std::make_unique<StatusManager>();
 
-    if (getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    if (mConfig.ENTRY_CACHE_SIZE < 20000)
     {
-        resetLedgerState();
+        LOG_WARNING(DEFAULT_LOG,
+                    "ENTRY_CACHE_SIZE({}) is below the recommended minimum "
+                    "of 20000",
+                    mConfig.ENTRY_CACHE_SIZE);
     }
-    else
-    {
-        if (mConfig.ENTRY_CACHE_SIZE < 20000)
-        {
-            LOG_WARNING(DEFAULT_LOG,
-                        "ENTRY_CACHE_SIZE({}) is below the recommended minimum "
-                        "of 20000",
-                        mConfig.ENTRY_CACHE_SIZE);
-        }
-        mLedgerTxnRoot = std::make_unique<LedgerTxnRoot>(
-            *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE
+    mLedgerTxnRoot = std::make_unique<LedgerTxnRoot>(
+        *this, mConfig.ENTRY_CACHE_SIZE, mConfig.PREFETCH_BATCH_SIZE
 #ifdef BEST_OFFER_DEBUGGING
-            ,
-            mConfig.BEST_OFFER_DEBUGGING_ENABLED
+        ,
+        mConfig.BEST_OFFER_DEBUGGING_ENABLED
 #endif
-        );
+    );
 
-        BucketListIsConsistentWithDatabase::registerInvariant(*this);
+#ifdef BUILD_TESTS
+    if (getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    {
+        resetLedgerState();
     }
+#endif
+
+    BucketListIsConsistentWithDatabase::registerInvariant(*this);
 
     AccountSubEntriesCountIsValid::registerInvariant(*this);
     ConservationOfLumens::registerInvariant(*this);
@@ -386,6 +298,7 @@ ApplicationImpl::initialize(bool createNewDB, bool forceRebuild)
 void
 ApplicationImpl::resetLedgerState()
 {
+#ifdef BUILD_TESTS
     if (getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         mNeverCommittingLedgerTxn.reset();
@@ -395,12 +308,13 @@ ApplicationImpl::resetLedgerState()
 #endif
         );
         mNeverCommittingLedgerTxn = std::make_unique<InMemoryLedgerTxn>(
-            *mInMemoryLedgerTxnRoot, getDatabase());
+            *mInMemoryLedgerTxnRoot, getDatabase(), mLedgerTxnRoot.get());
     }
     else
+#endif
     {
         auto& lsRoot = getLedgerTxnRoot();
-        lsRoot.deleteObjectsModifiedOnOrAfterLedger(0);
+        lsRoot.deleteOffersModifiedOnOrAfterLedger(0);
     }
 }
 
@@ -419,10 +333,7 @@ ApplicationImpl::upgradeToCurrentSchemaAndMaybeRebuildLedger(bool applyBuckets,
     if (forceRebuild)
     {
         auto& ps = getPersistentState();
-        for (auto let : xdr::xdr_traits<LedgerEntryType>::enum_values())
-        {
-            ps.setRebuildForType(static_cast<LedgerEntryType>(let));
-        }
+        ps.setRebuildForOfferTable();
     }
 
     mDatabase->upgradeToCurrentSchema();
@@ -740,26 +651,13 @@ ApplicationImpl::validateAndLogConfig()
             "RUN_STANDALONE is not set");
     }
 
-    // EXPERIMENTAL_PRECAUTION_DELAY_META is only meaningful when there's a
-    // METADATA_OUTPUT_STREAM.  We only allow EXPERIMENTAL_PRECAUTION_DELAY_META
-    // on a captive core, without a persistent database; old-style ingestion
-    // which reads from the core database could do the delaying itself.
-    if (mConfig.METADATA_OUTPUT_STREAM != "" &&
-        mConfig.EXPERIMENTAL_PRECAUTION_DELAY_META && !mConfig.isInMemoryMode())
+    if (mConfig.METADATA_OUTPUT_STREAM == "" &&
+        mConfig.EXPERIMENTAL_PRECAUTION_DELAY_META)
     {
-        throw std::invalid_argument(
-            "Using a METADATA_OUTPUT_STREAM with "
-            "EXPERIMENTAL_PRECAUTION_DELAY_META set to true "
-            "requires --in-memory");
+        CLOG_WARNING(Tx, "EXPERIMENTAL_PRECAUTION_DELAY_META is ignored "
+                         "because METADATA_OUTPUT_STREAM is not set");
     }
 
-    if (mConfig.isInMemoryMode())
-    {
-        CLOG_WARNING(
-            Bucket,
-            "in-memory mode is enabled. This feature is deprecated! Node "
-            "may see performance degredation and lose sync with the network.");
-    }
     if (!mDatabase->isSqlite())
     {
         CLOG_WARNING(Database,
@@ -768,86 +666,35 @@ ApplicationImpl::validateAndLogConfig()
                      "release. Please use sqlite3 for non-ledger state data.");
     }
 
-    if (mConfig.DEPRECATED_SQL_LEDGER_STATE)
+    auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT;
+    if (pageSizeExp != 0)
     {
-        if (mPersistentState->getState(PersistentState::kDBBackend) ==
-            BucketIndex::DB_BACKEND_STATE)
+        // If the page size is less than 256 bytes, it is essentially
+        // indexing individual keys, so page size should be set to 0
+        // instead.
+        if (pageSizeExp < 8)
         {
             throw std::invalid_argument(
-                "To downgrade to DEPRECATED_SQL_LEDGER_STATE, run "
-                "stellar-core new-db.");
-        }
-
-        CLOG_WARNING(
-            Bucket,
-            "SQL for ledger state is enabled. This feature is deprecated! Node "
-            "may see performance degredation and lose sync with the network.");
-    }
-    else
-    {
-        if (mConfig.isUsingBucketListDB())
-        {
-            mPersistentState->setState(PersistentState::kDBBackend,
-                                       BucketIndex::DB_BACKEND_STATE);
-            auto pageSizeExp = mConfig.BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT;
-            if (pageSizeExp != 0)
-            {
-                // If the page size is less than 256 bytes, it is essentially
-                // indexing individual keys, so page size should be set to 0
-                // instead.
-                if (pageSizeExp < 8)
-                {
-                    throw std::invalid_argument(
-                        "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT "
-                        "must be at least 8 or set to 0 for individual entry "
-                        "indexing");
-                }
-
-                // Check if pageSize will cause overflow
-                if (pageSizeExp > 31)
-                {
-                    throw std::invalid_argument(
-                        "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT "
-                        "must be less than 32");
-                }
-            }
-
-            CLOG_INFO(Bucket,
-                      "BucketListDB enabled: pageSizeExponent: {} indexCutOff: "
-                      "{}MB, persist indexes: {}",
-                      pageSizeExp, mConfig.BUCKETLIST_DB_INDEX_CUTOFF,
-                      mConfig.isPersistingBucketListDBIndexes());
+                "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT "
+                "must be at least 8 or set to 0 for individual entry "
+                "indexing");
         }
-        else
-        {
-            CLOG_WARNING(
-                Bucket,
-                "DEPRECATED_SQL_LEDGER_STATE set to false but "
-                "deprecated SQL ledger state is active. To disable deprecated "
-                "SQL ledger state, "
-                "MODE_ENABLES_BUCKETLIST must be set and --in-memory flag "
-                "must not be used.");
-        }
-    }
 
-    if (mConfig.BACKGROUND_EVICTION_SCAN)
-    {
-        if (!mConfig.isUsingBucketListDB())
+        // Check if pageSize will cause overflow
+        if (pageSizeExp > 31)
         {
             throw std::invalid_argument(
-                "BACKGROUND_EVICTION_SCAN set to true but "
-                "DEPRECATED_SQL_LEDGER_STATE is set to true. "
-                "DEPRECATED_SQL_LEDGER_STATE must be set to false to enable "
-                "background eviction.");
-        }
-
-        if (mConfig.WORKER_THREADS < 2)
-        {
-            throw std::invalid_argument("BACKGROUND_EVICTION_SCAN requires "
-                                        "WORKER_THREADS > 1");
+                "BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT "
+                "must be less than 32");
         }
     }
 
+    CLOG_INFO(Bucket,
+              "BucketListDB enabled: pageSizeExponent: {} indexCutOff: "
+              "{}MB, persist indexes: {}",
+              pageSizeExp, mConfig.BUCKETLIST_DB_INDEX_CUTOFF,
+              mConfig.BUCKETLIST_DB_PERSIST_INDEX);
+
     if (mConfig.HTTP_QUERY_PORT != 0)
     {
         if (isNetworkedValidator)
@@ -863,13 +710,6 @@ ApplicationImpl::validateAndLogConfig()
                 "HTTP_QUERY_PORT must be different from HTTP_PORT");
         }
 
-        if (!mConfig.isUsingBucketListDB())
-        {
-            throw std::invalid_argument(
-                "HTTP_QUERY_PORT requires DEPRECATED_SQL_LEDGER_STATE to be "
-                "false");
-        }
-
         if (mConfig.QUERY_THREAD_POOL_SIZE == 0)
         {
             throw std::invalid_argument(
@@ -877,13 +717,6 @@ ApplicationImpl::validateAndLogConfig()
         }
     }
 
-    if (isNetworkedValidator && mConfig.isInMemoryMode())
-    {
-        throw std::invalid_argument(
-            "In-memory mode is set, NODE_IS_VALIDATOR is set, "
-            "and RUN_STANDALONE is not set");
-    }
-
     if (getHistoryArchiveManager().hasAnyWritableHistoryArchive())
     {
         if (!mConfig.modeStoresAllHistory())
@@ -1632,7 +1465,14 @@ AbstractLedgerTxnParent&
 ApplicationImpl::getLedgerTxnRoot()
 {
     releaseAssert(threadIsMain());
-    return mConfig.MODE_USES_IN_MEMORY_LEDGER ? *mNeverCommittingLedgerTxn
-                                              : *mLedgerTxnRoot;
+
+#ifdef BUILD_TESTS
+    if (mConfig.MODE_USES_IN_MEMORY_LEDGER)
+    {
+        return *mNeverCommittingLedgerTxn;
+    }
+#endif
+
+    return *mLedgerTxnRoot;
 }
 }
diff --git a/src/main/ApplicationImpl.h b/src/main/ApplicationImpl.h
index d195e04774..4dcc71266c 100644
--- a/src/main/ApplicationImpl.h
+++ b/src/main/ApplicationImpl.h
@@ -189,8 +189,10 @@ class ApplicationImpl : public Application
     // is held in the never-committing LedgerTxn in its entirety -- so if it
     // ever grows beyond RAM-size you need to use a mode with some sort of
     // database on secondary storage.
+#ifdef BUILD_TESTS
     std::unique_ptr<InMemoryLedgerTxnRoot> mInMemoryLedgerTxnRoot;
     std::unique_ptr<InMemoryLedgerTxn> mNeverCommittingLedgerTxn;
+#endif
 
     std::unique_ptr<CommandHandler> mCommandHandler;
 
diff --git a/src/main/ApplicationUtils.cpp b/src/main/ApplicationUtils.cpp
index 5f33fce2a5..c89559e56a 100644
--- a/src/main/ApplicationUtils.cpp
+++ b/src/main/ApplicationUtils.cpp
@@ -118,79 +118,9 @@ minimalDbPath(Config const& cfg)
     return dpath;
 }
 
-void
-setupMinimalDBForInMemoryMode(Config const& cfg, uint32_t startAtLedger)
-{
-    releaseAssertOrThrow(cfg.isInMemoryMode());
-
-    VirtualClock clock;
-    Application::pointer app;
-
-    // Look for an existing minimal database, and see if it's possible to
-    // restore ledger state from buckets. If it is not possible, reset the
-    // existing database back to genesis. If the minimal database does not
-    // exist, create a new one.
-    bool found = false;
-
-    auto cfgToCheckDB = cfg;
-    cfgToCheckDB.METADATA_OUTPUT_STREAM = "";
-
-    if (std::filesystem::exists(minimalDbPath(cfg)))
-    {
-        app = Application::create(clock, cfgToCheckDB, /* newDB */ false);
-        found = true;
-    }
-    else
-    {
-        LOG_INFO(DEFAULT_LOG, "Minimal database not found, creating one...");
-        app = Application::create(clock, cfgToCheckDB, /* newDB */ true);
-    }
-
-    // Rebuild the state from scratch if:
-    //  - --start-at-ledger was not provided
-    //  - target catchup ledger is before LCL
-    //  - target catchup ledger is too far ahead of LCL
-    // In all other cases, attempt restoring the ledger states via
-    // local bucket application
-    if (found)
-    {
-        LOG_INFO(DEFAULT_LOG, "Found the existing minimal database");
-
-        // DB state might be set to 0 if core previously exited while rebuilding
-        // state. In this case, we want to rebuild the DB from scratch
-        bool rebuildDB =
-            app->getLedgerManager().getLastClosedLedgerHAS().currentLedger <
-            LedgerManager::GENESIS_LEDGER_SEQ;
-
-        if (!rebuildDB)
-        {
-            // Ledger state is not yet ready during this setup step
-            app->getLedgerManager().loadLastKnownLedger(
-                /* restoreBucketlist */ false, /* isLedgerStateReady */ false);
-            auto lcl = app->getLedgerManager().getLastClosedLedgerNum();
-            LOG_INFO(DEFAULT_LOG, "Current in-memory state, got LCL: {}", lcl);
-            rebuildDB =
-                !canRebuildInMemoryLedgerFromBuckets(startAtLedger, lcl);
-        }
-
-        if (rebuildDB)
-        {
-            LOG_INFO(DEFAULT_LOG, "Cannot restore the in-memory state, "
-                                  "rebuilding the state from scratch");
-            app->resetDBForInMemoryMode();
-        }
-    }
-}
-
 Application::pointer
-setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger,
-         std::string const& startAtHash)
+setupApp(Config& cfg, VirtualClock& clock)
 {
-    if (cfg.isInMemoryMode())
-    {
-        setupMinimalDBForInMemoryMode(cfg, startAtLedger);
-    }
-
     LOG_INFO(DEFAULT_LOG, "Starting stellar-core {}", STELLAR_CORE_VERSION);
     Application::pointer app;
     app = Application::create(clock, cfg, false);
@@ -202,10 +132,10 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger,
     // With in-memory mode, ledger state is not yet ready during this setup step
     app->getLedgerManager().loadLastKnownLedger(
         /* restoreBucketlist */ false,
-        /* isLedgerStateReady */ !cfg.isInMemoryMode());
+        /* isLedgerStateReady */ !cfg.MODE_USES_IN_MEMORY_LEDGER);
     auto lcl = app->getLedgerManager().getLastClosedLedgerHeader();
 
-    if (cfg.isInMemoryMode() &&
+    if (cfg.MODE_USES_IN_MEMORY_LEDGER &&
         lcl.header.ledgerSeq == LedgerManager::GENESIS_LEDGER_SEQ)
     {
         // If ledger is genesis, rebuild genesis state from buckets
@@ -215,67 +145,6 @@ setupApp(Config& cfg, VirtualClock& clock, uint32_t startAtLedger,
         }
     }
 
-    bool doCatchupForInMemoryMode =
-        cfg.isInMemoryMode() && startAtLedger != 0 && !startAtHash.empty();
-    if (doCatchupForInMemoryMode)
-    {
-        // At this point, setupApp has either confirmed that we can rebuild from
-        // the existing buckets, or reset the DB to genesis
-        if (lcl.header.ledgerSeq != LedgerManager::GENESIS_LEDGER_SEQ)
-        {
-            auto lclHashStr = binToHex(lcl.hash);
-            if (lcl.header.ledgerSeq == startAtLedger &&
-                lclHashStr != startAtHash)
-            {
-                LOG_ERROR(DEFAULT_LOG,
-                          "Provided hash {} does not agree with stored hash {}",
-                          startAtHash, lclHashStr);
-                return nullptr;
-            }
-
-            auto has = app->getLedgerManager().getLastClosedLedgerHAS();
-
-            // Collect bucket references to pass to catchup _before_ starting
-            // the app, which may trigger garbage collection
-            std::set<std::shared_ptr<Bucket>> retained;
-            for (auto const& b : has.allBuckets())
-            {
-                auto bPtr =
-                    app->getBucketManager().getBucketByHash(hexToBin256(b));
-                releaseAssert(bPtr);
-                retained.insert(bPtr);
-            }
-
-            // Start the app with LCL set to 0
-            app->getLedgerManager().setupInMemoryStateRebuild();
-            app->start();
-
-            // Set Herder to track the actual LCL
-            app->getHerder().setTrackingSCPState(lcl.header.ledgerSeq,
-                                                 lcl.header.scpValue, true);
-
-            // Schedule the catchup work that will rebuild state
-            auto cc = CatchupConfiguration(has, lcl);
-            app->getLedgerManager().startCatchup(cc, /* archive */ nullptr,
-                                                 retained);
-        }
-        else
-        {
-            LedgerNumHashPair pair;
-            pair.first = startAtLedger;
-            pair.second = std::optional<Hash>(hexToBin256(startAtHash));
-            auto mode = CatchupConfiguration::Mode::OFFLINE_BASIC;
-            Json::Value catchupInfo;
-            int res =
-                catchup(app, CatchupConfiguration{pair, 0, mode}, catchupInfo,
-                        /* archive */ nullptr);
-            if (res != 0)
-            {
-                return nullptr;
-            }
-        }
-    }
-
     return app;
 }
 
@@ -314,8 +183,7 @@ runApp(Application::pointer app)
 }
 
 bool
-applyBucketsForLCL(Application& app,
-                   std::function<bool(LedgerEntryType)> onlyApply)
+applyBucketsForLCL(Application& app)
 {
     auto has = app.getLedgerManager().getLastClosedLedgerHAS();
     auto lclHash =
@@ -329,9 +197,9 @@ applyBucketsForLCL(Application& app,
         maxProtocolVersion = currentLedger->ledgerVersion;
     }
 
-    std::map<std::string, std::shared_ptr<Bucket>> buckets;
+    std::map<std::string, std::shared_ptr<LiveBucket>> buckets;
     auto work = app.getWorkScheduler().scheduleWork<ApplyBucketsWork>(
-        buckets, has, maxProtocolVersion, onlyApply);
+        buckets, has, maxProtocolVersion);
 
     while (app.getClock().crank(true) && !work->isDone())
         ;
@@ -339,12 +207,6 @@ applyBucketsForLCL(Application& app,
     return work->getState() == BasicWork::State::WORK_SUCCESS;
 }
 
-bool
-applyBucketsForLCL(Application& app)
-{
-    return applyBucketsForLCL(app, [](LedgerEntryType) { return true; });
-}
-
 void
 httpCommand(std::string const& command, unsigned short port)
 {
@@ -574,11 +436,11 @@ struct StateArchivalMetric
 
 static void
 processArchivalMetrics(
-    std::shared_ptr<Bucket const> const b,
+    std::shared_ptr<LiveBucket const> const b,
     UnorderedMap<LedgerKey, StateArchivalMetric>& ledgerEntries,
     UnorderedMap<LedgerKey, std::pair<StateArchivalMetric, uint32_t>>& ttls)
 {
-    for (BucketInputIterator in(b); in; ++in)
+    for (LiveBucketInputIterator in(b); in; ++in)
     {
         auto const& be = *in;
         bool isDead = be.type() == DEADENTRY;
@@ -647,7 +509,7 @@ dumpStateArchivalStatistics(Config cfg)
     HistoryArchiveState has = lm.getLastClosedLedgerHAS();
 
     std::vector<Hash> hashes;
-    for (uint32_t i = 0; i < BucketList::kNumLevels; ++i)
+    for (uint32_t i = 0; i < LiveBucketList::kNumLevels; ++i)
     {
         HistoryStateBucket const& hsb = has.currentBuckets.at(i);
         hashes.emplace_back(hexToBin256(hsb.curr));
@@ -665,7 +527,7 @@ dumpStateArchivalStatistics(Config cfg)
         {
             continue;
         }
-        auto b = bm.getBucketByHash(hash);
+        auto b = bm.getLiveBucketByHash(hash);
         if (!b)
         {
             throw std::runtime_error(std::string("missing bucket: ") +
@@ -720,7 +582,7 @@ dumpStateArchivalStatistics(Config cfg)
         }
     }
 
-    CLOG_INFO(Bucket, "BucketList total bytes: {}", blSize);
+    CLOG_INFO(Bucket, "Live BucketList total bytes: {}", blSize);
     CLOG_INFO(Bucket,
               "Live Temporary Entries: Newest bytes {} ({}%), Outdated bytes "
               "{} ({}%)",
@@ -929,7 +791,7 @@ loadXdr(Config cfg, std::string const& bucketFile)
     Application::pointer app = Application::create(clock, cfg, false);
 
     uint256 zero;
-    Bucket bucket(bucketFile, zero, nullptr);
+    LiveBucket bucket(bucketFile, zero, nullptr);
     bucket.apply(*app);
 }
 
diff --git a/src/main/ApplicationUtils.h b/src/main/ApplicationUtils.h
index 30d2cb0fed..ac0848bdb6 100644
--- a/src/main/ApplicationUtils.h
+++ b/src/main/ApplicationUtils.h
@@ -15,9 +15,7 @@ namespace stellar
 class CatchupConfiguration;
 
 // Create application and validate its configuration
-Application::pointer setupApp(Config& cfg, VirtualClock& clock,
-                              uint32_t startAtLedger,
-                              std::string const& startAtHash);
+Application::pointer setupApp(Config& cfg, VirtualClock& clock);
 int runApp(Application::pointer app);
 void setForceSCPFlag();
 void initializeDatabase(Config cfg);
@@ -57,8 +55,6 @@ int catchup(Application::pointer app, CatchupConfiguration cc,
 // Reduild ledger state based on the buckets. Ensure ledger state is properly
 // reset before calling this function.
 bool applyBucketsForLCL(Application& app);
-bool applyBucketsForLCL(Application& app,
-                        std::function<bool(LedgerEntryType)> onlyApply);
 int publish(Application::pointer app);
 std::string minimalDBForInMemoryMode(Config const& cfg);
 bool canRebuildInMemoryLedgerFromBuckets(uint32_t startAtLedger, uint32_t lcl);
diff --git a/src/main/CommandHandler.cpp b/src/main/CommandHandler.cpp
index fd8d9e3034..7a198e8c40 100644
--- a/src/main/CommandHandler.cpp
+++ b/src/main/CommandHandler.cpp
@@ -82,8 +82,7 @@ CommandHandler::CommandHandler(Application& app) : mApp(app)
             app.getClock().getIOContext(), ipStr, mApp.getConfig().HTTP_PORT,
             httpMaxClient);
 
-        if (mApp.getConfig().HTTP_QUERY_PORT &&
-            mApp.getConfig().isUsingBucketListDB())
+        if (mApp.getConfig().HTTP_QUERY_PORT)
         {
             mQueryServer = std::make_unique<QueryServer>(
                 ipStr, mApp.getConfig().HTTP_QUERY_PORT, httpMaxClient,
diff --git a/src/main/CommandLine.cpp b/src/main/CommandLine.cpp
index c5aada5c6b..842de40f2e 100644
--- a/src/main/CommandLine.cpp
+++ b/src/main/CommandLine.cpp
@@ -334,54 +334,6 @@ maybeSetMetadataOutputStream(Config& cfg, std::string const& stream)
     }
 }
 
-void
-maybeEnableInMemoryMode(Config& config, bool inMemory, uint32_t startAtLedger,
-                        std::string const& startAtHash, bool persistMinimalData)
-{
-    // First, ensure user parameters are valid
-    if (!inMemory)
-    {
-        if (startAtLedger != 0)
-        {
-            throw std::runtime_error("--start-at-ledger requires --in-memory");
-        }
-        if (!startAtHash.empty())
-        {
-            throw std::runtime_error("--start-at-hash requires --in-memory");
-        }
-        return;
-    }
-    if (startAtLedger != 0 && startAtHash.empty())
-    {
-        throw std::runtime_error("--start-at-ledger requires --start-at-hash");
-    }
-    else if (startAtLedger == 0 && !startAtHash.empty())
-    {
-        throw std::runtime_error("--start-at-hash requires --start-at-ledger");
-    }
-
-    // Adjust configs for live in-memory-replay mode
-    config.setInMemoryMode();
-
-    if (startAtLedger != 0 && !startAtHash.empty())
-    {
-        config.MODE_AUTO_STARTS_OVERLAY = false;
-    }
-
-    // Set database to a small sqlite database used to store minimal data needed
-    // to restore the ledger state
-    if (persistMinimalData)
-    {
-        config.DATABASE = SecretValue{minimalDBForInMemoryMode(config)};
-        config.MODE_STORES_HISTORY_LEDGERHEADERS = true;
-        // Since this mode stores historical data (needed to restore
-        // ledger state in certain scenarios), set maintenance to run
-        // aggressively so that we only store a few ledgers worth of data
-        config.AUTOMATIC_MAINTENANCE_PERIOD = std::chrono::seconds(30);
-        config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT;
-    }
-}
-
 clara::Opt
 ledgerHashParser(std::string& ledgerHash)
 {
@@ -396,29 +348,6 @@ forceUntrustedCatchup(bool& force)
         "force unverified catchup");
 }
 
-clara::Opt
-inMemoryParser(bool& inMemory)
-{
-    return clara::Opt{inMemory}["--in-memory"](
-        "(DEPRECATED) store working ledger in memory rather than database");
-}
-
-clara::Opt
-startAtLedgerParser(uint32_t& startAtLedger)
-{
-    return clara::Opt{startAtLedger, "LEDGER"}["--start-at-ledger"](
-        "(DEPRECATED) start in-memory run with replay from historical ledger "
-        "number");
-}
-
-clara::Opt
-startAtHashParser(std::string& startAtHash)
-{
-    return clara::Opt{startAtHash, "HASH"}["--start-at-hash"](
-        "(DEPRECATED) start in-memory run with replay from historical ledger "
-        "hash");
-}
-
 clara::Opt
 filterQueryParser(std::optional<std::string>& filterQuery)
 {
@@ -857,8 +786,8 @@ runCatchup(CommandLineArgs const& args)
          catchupArchiveParser,
          trustedCheckpointHashesParser(trustedCheckpointHashesFile),
          outputFileParser(outputFile), disableBucketGCParser(disableBucketGC),
-         validationParser(completeValidation), inMemoryParser(inMemory),
-         ledgerHashParser(hash), forceUntrustedCatchup(forceUntrusted),
+         validationParser(completeValidation), ledgerHashParser(hash),
+         forceUntrustedCatchup(forceUntrusted),
          metadataOutputStreamParser(stream), forceBackParser(forceBack)},
         [&] {
             auto config = configOption.getConfig();
@@ -879,10 +808,6 @@ runCatchup(CommandLineArgs const& args)
                 config.AUTOMATIC_MAINTENANCE_COUNT = MAINTENANCE_LEDGER_COUNT;
             }
 
-            // --start-at-ledger and --start-at-hash aren't allowed in catchup,
-            // so pass defaults values
-            maybeEnableInMemoryMode(config, inMemory, 0, "",
-                                    /* persistMinimalData */ false);
             maybeSetMetadataOutputStream(config, stream);
 
             VirtualClock clock(VirtualClock::REAL_TIME);
@@ -1024,9 +949,8 @@ runWriteVerifiedCheckpointHashes(CommandLineArgs const& args)
             VirtualClock clock(VirtualClock::REAL_TIME);
             auto cfg = configOption.getConfig();
 
-            // Set up for quick in-memory no-catchup mode.
+            // Set up for quick no-catchup mode.
             cfg.QUORUM_INTERSECTION_CHECKER = false;
-            cfg.setInMemoryMode();
             cfg.MODE_DOES_CATCHUP = false;
 
             auto app = Application::create(clock, cfg, false);
@@ -1226,25 +1150,13 @@ int
 runNewDB(CommandLineArgs const& args)
 {
     CommandLine::ConfigOption configOption;
-    bool minimalForInMemoryMode = false;
-
-    auto minimalDBParser = [](bool& minimalForInMemoryMode) {
-        return clara::Opt{
-            minimalForInMemoryMode}["--minimal-for-in-memory-mode"](
-            "Reset the special database used only for in-memory mode (see "
-            "--in-memory flag");
-    };
 
     return runWithHelp(args,
-                       {configurationParser(configOption),
-                        minimalDBParser(minimalForInMemoryMode)},
+                       {
+                           configurationParser(configOption),
+                       },
                        [&] {
                            auto cfg = configOption.getConfig();
-                           if (minimalForInMemoryMode)
-                           {
-                               cfg.DATABASE =
-                                   SecretValue{minimalDBForInMemoryMode(cfg)};
-                           }
                            initializeDatabase(cfg);
                            return 0;
                        });
@@ -1520,18 +1432,14 @@ run(CommandLineArgs const& args)
     CommandLine::ConfigOption configOption;
     auto disableBucketGC = false;
     std::string stream;
-    bool inMemory = false;
     bool waitForConsensus = false;
-    uint32_t startAtLedger = 0;
-    std::string startAtHash;
 
     return runWithHelp(
         args,
         {configurationParser(configOption),
          disableBucketGCParser(disableBucketGC),
-         metadataOutputStreamParser(stream), inMemoryParser(inMemory),
-         waitForConsensusParser(waitForConsensus),
-         startAtLedgerParser(startAtLedger), startAtHashParser(startAtHash)},
+         metadataOutputStreamParser(stream),
+         waitForConsensusParser(waitForConsensus)},
         [&] {
             Config cfg;
             std::shared_ptr<VirtualClock> clock;
@@ -1549,14 +1457,10 @@ run(CommandLineArgs const& args)
                 {
                     cfg.DATABASE = SecretValue{"sqlite3://:memory:"};
                     cfg.MODE_STORES_HISTORY_MISC = false;
-                    cfg.MODE_USES_IN_MEMORY_LEDGER = false;
                     cfg.MODE_ENABLES_BUCKETLIST = false;
                     cfg.PREFETCH_BATCH_SIZE = 0;
                 }
 
-                maybeEnableInMemoryMode(cfg, inMemory, startAtLedger,
-                                        startAtHash,
-                                        /* persistMinimalData */ true);
                 maybeSetMetadataOutputStream(cfg, stream);
                 cfg.FORCE_SCP =
                     cfg.NODE_IS_VALIDATOR ? !waitForConsensus : false;
@@ -1600,7 +1504,7 @@ run(CommandLineArgs const& args)
                 // Note that when in in-memory mode, additional setup may be
                 // required (such as database reset, catchup, etc)
                 clock = std::make_shared<VirtualClock>(clockMode);
-                app = setupApp(cfg, *clock, startAtLedger, startAtHash);
+                app = setupApp(cfg, *clock);
                 if (!app)
                 {
                     LOG_ERROR(DEFAULT_LOG,
diff --git a/src/main/Config.cpp b/src/main/Config.cpp
index 58c1eb8dc8..316d16e264 100644
--- a/src/main/Config.cpp
+++ b/src/main/Config.cpp
@@ -121,7 +121,6 @@ Config::Config() : NODE_SEED(SecretKey::random())
 
     // non configurable
     MODE_ENABLES_BUCKETLIST = true;
-    MODE_USES_IN_MEMORY_LEDGER = false;
     MODE_STORES_HISTORY_MISC = true;
     MODE_STORES_HISTORY_LEDGERHEADERS = true;
     MODE_DOES_CATCHUP = true;
@@ -163,11 +162,9 @@ Config::Config() : NODE_SEED(SecretKey::random())
     CATCHUP_RECENT = 0;
     EXPERIMENTAL_PRECAUTION_DELAY_META = false;
     EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING = false;
-    DEPRECATED_SQL_LEDGER_STATE = false;
     BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT = 14; // 2^14 == 16 kb
     BUCKETLIST_DB_INDEX_CUTOFF = 20;             // 20 mb
     BUCKETLIST_DB_PERSIST_INDEX = true;
-    BACKGROUND_EVICTION_SCAN = true;
     PUBLISH_TO_ARCHIVE_DELAY = std::chrono::seconds{0};
     // automatic maintenance settings:
     // short and prime with 1 hour which will cause automatic maintenance to
@@ -310,6 +307,7 @@ Config::Config() : NODE_SEED(SecretKey::random())
 
 #ifdef BUILD_TESTS
     TEST_CASES_ENABLED = false;
+    MODE_USES_IN_MEMORY_LEDGER = false;
 #endif
 
 #ifdef BEST_OFFER_DEBUGGING
@@ -1069,28 +1067,37 @@ Config::processConfig(std::shared_ptr<cpptoml::table> t)
                      EXPERIMENTAL_BACKGROUND_OVERLAY_PROCESSING =
                          readBool(item);
                  }},
+                // TODO: Flags are no longer supported, remove in next release.
                 {"BACKGROUND_EVICTION_SCAN",
-                 [&]() { BACKGROUND_EVICTION_SCAN = readBool(item); }},
-                // TODO: Flag is no longer supported, remove in next release.
+                 [&]() {
+                     CLOG_WARNING(
+                         Bucket,
+                         "BACKGROUND_EVICTION_SCAN is deprecated and ignored. "
+                         "Please remove this from config");
+                 }},
                 {"EXPERIMENTAL_BACKGROUND_EVICTION_SCAN",
                  [&]() {
                      CLOG_WARNING(
                          Bucket,
                          "EXPERIMENTAL_BACKGROUND_EVICTION_SCAN is deprecated "
                          "and "
-                         "is ignored. Use BACKGROUND_EVICTION_SCAN instead");
+                         "is ignored. Please remove from config");
                  }},
                 {"DEPRECATED_SQL_LEDGER_STATE",
-                 [&]() { DEPRECATED_SQL_LEDGER_STATE = readBool(item); }},
+                 [&]() {
+                     CLOG_WARNING(
+                         Bucket,
+                         "DEPRECATED_SQL_LEDGER_STATE is deprecated and "
+                         "ignored. Please remove from config");
+                 }},
                 // Still support EXPERIMENTAL_BUCKETLIST_DB* flags for
                 // captive-core for 21.0 release, remove in 21.1 release
                 {"EXPERIMENTAL_BUCKETLIST_DB",
                  [&]() {
-                     DEPRECATED_SQL_LEDGER_STATE = !readBool(item);
                      CLOG_WARNING(
                          Bucket,
-                         "EXPERIMENTAL_BUCKETLIST_DB flag is deprecated, "
-                         "use DEPRECATED_SQL_LEDGER_STATE=false instead.");
+                         "EXPERIMENTAL_BUCKETLIST_DB flag is deprecated. "
+                         "please remove from config");
                  }},
                 {"EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT",
                  [&]() {
@@ -1568,8 +1575,8 @@ Config::processConfig(std::shared_ptr<cpptoml::table> t)
                  }},
                 {"TESTING_STARTING_EVICTION_SCAN_LEVEL",
                  [&]() {
-                     TESTING_STARTING_EVICTION_SCAN_LEVEL =
-                         readInt<uint32_t>(item, 1, BucketList::kNumLevels - 1);
+                     TESTING_STARTING_EVICTION_SCAN_LEVEL = readInt<uint32_t>(
+                         item, 1, LiveBucketList::kNumLevels - 1);
                  }},
                 {"TESTING_MAX_ENTRIES_TO_ARCHIVE",
                  [&]() {
@@ -1689,33 +1696,11 @@ Config::processConfig(std::shared_ptr<cpptoml::table> t)
         // Validators default to starting the network from local state
         FORCE_SCP = NODE_IS_VALIDATOR;
 
-        // Require either DEPRECATED_SQL_LEDGER_STATE or
-        // EXPERIMENTAL_BUCKETLIST_DB to be backwards compatible with horizon
-        // and RPC, but do not allow both.
-        if (!t->contains("DEPRECATED_SQL_LEDGER_STATE") &&
-            !t->contains("EXPERIMENTAL_BUCKETLIST_DB"))
-        {
-            std::string msg =
-                "Invalid configuration: "
-                "DEPRECATED_SQL_LEDGER_STATE not set. Default setting is FALSE "
-                "and is appropriate for most nodes.";
-            throw std::runtime_error(msg);
-        }
         // Only allow one version of all BucketListDB flags, either the
         // deprecated flag or new flag, but not both.
-        else if (t->contains("DEPRECATED_SQL_LEDGER_STATE") &&
-                 t->contains("EXPERIMENTAL_BUCKETLIST_DB"))
-        {
-            std::string msg =
-                "Invalid configuration: EXPERIMENTAL_BUCKETLIST_DB and "
-                "DEPRECATED_SQL_LEDGER_STATE must not both be set. "
-                "EXPERIMENTAL_BUCKETLIST_DB is deprecated, use "
-                "DEPRECATED_SQL_LEDGER_STATE only.";
-            throw std::runtime_error(msg);
-        }
-        else if (t->contains(
-                     "EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT") &&
-                 t->contains("BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT"))
+        if (t->contains(
+                "EXPERIMENTAL_BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT") &&
+            t->contains("BUCKETLIST_DB_INDEX_PAGE_SIZE_EXPONENT"))
         {
             std::string msg =
                 "Invalid configuration: "
@@ -1748,14 +1733,6 @@ Config::processConfig(std::shared_ptr<cpptoml::table> t)
             throw std::runtime_error(msg);
         }
 
-        // If DEPRECATED_SQL_LEDGER_STATE is set to false and
-        // BACKGROUND_EVICTION_SCAN is not set, override default value to false
-        // so that nodes still running SQL ledger don't crash on startup
-        if (!isUsingBucketListDB() && !t->contains("BACKGROUND_EVICTION_SCAN"))
-        {
-            BACKGROUND_EVICTION_SCAN = false;
-        }
-
         // process elements that potentially depend on others
         if (t->contains("VALIDATORS"))
         {
@@ -2263,54 +2240,12 @@ Config::getExpectedLedgerCloseTime() const
     return Herder::EXP_LEDGER_TIMESPAN_SECONDS;
 }
 
-void
-Config::setInMemoryMode()
-{
-    MODE_USES_IN_MEMORY_LEDGER = true;
-    DATABASE = SecretValue{"sqlite3://:memory:"};
-    MODE_STORES_HISTORY_MISC = false;
-    MODE_STORES_HISTORY_LEDGERHEADERS = false;
-    MODE_ENABLES_BUCKETLIST = true;
-    BACKGROUND_EVICTION_SCAN = false;
-}
-
 bool
 Config::modeDoesCatchupWithBucketList() const
 {
     return MODE_DOES_CATCHUP && MODE_ENABLES_BUCKETLIST;
 }
 
-bool
-Config::isInMemoryMode() const
-{
-    return MODE_USES_IN_MEMORY_LEDGER;
-}
-
-bool
-Config::isUsingBucketListDB() const
-{
-    return !DEPRECATED_SQL_LEDGER_STATE && !MODE_USES_IN_MEMORY_LEDGER &&
-           MODE_ENABLES_BUCKETLIST;
-}
-
-bool
-Config::isUsingBackgroundEviction() const
-{
-    return isUsingBucketListDB() && BACKGROUND_EVICTION_SCAN;
-}
-
-bool
-Config::isPersistingBucketListDBIndexes() const
-{
-    return isUsingBucketListDB() && BUCKETLIST_DB_PERSIST_INDEX;
-}
-
-bool
-Config::isInMemoryModeWithoutMinimalDB() const
-{
-    return MODE_USES_IN_MEMORY_LEDGER && !MODE_STORES_HISTORY_LEDGERHEADERS;
-}
-
 bool
 Config::modeStoresAllHistory() const
 {
diff --git a/src/main/Config.h b/src/main/Config.h
index 600a349259..3e1020caf9 100644
--- a/src/main/Config.h
+++ b/src/main/Config.h
@@ -143,31 +143,24 @@ class Config : public std::enable_shared_from_this<Config>
     //    via applying valid TXs or manually adding entries to the BucketList.
     //    BucketList state is not preserved over restarts. If this mode can be
     //    used, it should be.
-    // 2. TESTDB_IN_MEMORY_NO_OFFERS: allows arbitrary ledger state writes via
-    //    ltx root commits, but does not test the offers table. Suitable for
+    // 2. TESTDB_IN_MEMORY: allows arbitrary ledger state writes via
+    //    ltx root commits. Suitable for
     //    tests that required writes to the ledger state that cannot be achieved
     //    via valid TX application, such as testing invalid TX error codes or
     //    low level op testing.
-    // 3. TESTDB_IN_MEMORY_OFFERS: The same as TESTDB_IN_MEMORY_NO_OFFERS, but
-    //    tests the offers table. Suitable for testing ops that interact with
-    //    offers.
-    // 4. TESTDB_ON_DISK_SQLITE: Should only be used to test SQLITE specific
+    // 3. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific
     //    database operations.
-    // 5. TESTDB_POSTGRESQL: Should only be used to test POSTGRESQL specific
-    //    database operations.
-    // 6. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but
-    //    persists the BucketList over restart. This mode is very slow and
-    //    should only be used for testing restart behavior or some low level
-    //    BucketList features.
+    // 4. TESTDB_BUCKET_DB_PERSISTENT: Same as TESTDB_BUCKET_DB_VOLATILE, but
+    //    persists the BucketList and SQL DB over restart. This mode is very
+    //    slow and should only be used for testing restart behavior or some low
+    //    level BucketList features or for testing SQLite DB specific behavior.
     enum TestDbMode
     {
         TESTDB_DEFAULT,
-        TESTDB_IN_MEMORY_OFFERS,
-        TESTDB_ON_DISK_SQLITE,
+        TESTDB_IN_MEMORY,
 #ifdef USE_POSTGRES
         TESTDB_POSTGRESQL,
 #endif
-        TESTDB_IN_MEMORY_NO_OFFERS,
         TESTDB_BUCKET_DB_VOLATILE,
         TESTDB_BUCKET_DB_PERSISTENT,
         TESTDB_MODES
@@ -235,9 +228,9 @@ class Config : public std::enable_shared_from_this<Config>
     bool ARTIFICIALLY_PESSIMIZE_MERGES_FOR_TESTING;
 
     // A config parameter that avoids counting level 0 merge events and those
-    // within Bucket::fresh; this option exists only for calculating adjustments
-    // to the expected count of merges when stopping and resuming merges,
-    // and should be false in all normal cases.
+    // within LiveBucket::fresh; this option exists only for calculating
+    // adjustments to the expected count of merges when stopping and resuming
+    // merges, and should be false in all normal cases.
     bool ARTIFICIALLY_REDUCE_MERGE_COUNTS_FOR_TESTING;
 
     // A config parameter that skips adjustment of target outbound connections
@@ -377,20 +370,10 @@ class Config : public std::enable_shared_from_this<Config>
     // be set to `false` only for testing purposes.
     bool MODE_ENABLES_BUCKETLIST;
 
-    // A config parameter that uses a never-committing ledger. This means that
-    // all ledger entries will be kept in memory, and not persisted to DB
-    // (relevant tables won't even be created). This should not be set for
-    // production validators.
-    bool MODE_USES_IN_MEMORY_LEDGER;
-
     // A config parameter that can be set to true (in a captive-core
     // configuration) to delay emitting metadata by one ledger.
     bool EXPERIMENTAL_PRECAUTION_DELAY_META;
 
-    // A config parameter that when set uses SQL as the primary
-    // key-value store for LedgerEntry lookups instead of BucketListDB.
-    bool DEPRECATED_SQL_LEDGER_STATE;
-
     // Page size exponent used by BucketIndex when indexing ranges of
     // BucketEntry's. If set to 0, BucketEntry's are individually indexed.
     // Otherwise, pageSize ==
@@ -415,10 +398,6 @@ class Config : public std::enable_shared_from_this<Config>
     // persisted.
     bool BUCKETLIST_DB_PERSIST_INDEX;
 
-    // When set to true, eviction scans occur on the background thread,
-    // increasing performance. Requires EXPERIMENTAL_BUCKETLIST_DB.
-    bool BACKGROUND_EVICTION_SCAN;
-
     // A config parameter that stores historical data, such as transactions,
     // fees, and scp history in the database
     bool MODE_STORES_HISTORY_MISC;
@@ -698,6 +677,11 @@ class Config : public std::enable_shared_from_this<Config>
     // doing a graceful shutdown
     bool TEST_CASES_ENABLED;
 
+    // A config parameter that uses a never-committing ledger. This means that
+    // all ledger entries will be kept in memory, and not persisted to DB.
+    // Should only be used for testing.
+    bool MODE_USES_IN_MEMORY_LEDGER;
+
     // Set QUORUM_SET using automatic quorum set configuration based on
     // `validators`.
     void
@@ -730,10 +714,7 @@ class Config : public std::enable_shared_from_this<Config>
 
     std::chrono::seconds getExpectedLedgerCloseTime() const;
 
-    void setInMemoryMode();
     bool modeDoesCatchupWithBucketList() const;
-    bool isInMemoryMode() const;
-    bool isInMemoryModeWithoutMinimalDB() const;
     bool isUsingBucketListDB() const;
     bool isUsingBackgroundEviction() const;
     bool isPersistingBucketListDBIndexes() const;
diff --git a/src/main/PersistentState.cpp b/src/main/PersistentState.cpp
index 150d3f62ab..ecb7c12eaa 100644
--- a/src/main/PersistentState.cpp
+++ b/src/main/PersistentState.cpp
@@ -149,32 +149,24 @@ PersistentState::setSCPStateV1ForSlot(
 }
 
 bool
-PersistentState::shouldRebuildForType(LedgerEntryType let)
+PersistentState::shouldRebuildForOfferTable()
 {
     ZoneScoped;
-    return !getFromDb(getStoreStateName(kRebuildLedger, let)).empty();
+    return !getFromDb(getStoreStateName(kRebuildLedger, OFFER)).empty();
 }
 
 void
-PersistentState::clearRebuildForType(LedgerEntryType let)
+PersistentState::clearRebuildForOfferTable()
 {
     ZoneScoped;
-    updateDb(getStoreStateName(kRebuildLedger, let), "");
+    updateDb(getStoreStateName(kRebuildLedger, OFFER), "");
 }
 
 void
-PersistentState::setRebuildForType(LedgerEntryType let)
+PersistentState::setRebuildForOfferTable()
 {
     ZoneScoped;
-
-    // Only allow rebuilds for offer table if BucketListDB enabled, other tables
-    // don't exist
-    if (mApp.getConfig().isUsingBucketListDB() && let != OFFER)
-    {
-        return;
-    }
-
-    updateDb(getStoreStateName(kRebuildLedger, let), "1");
+    updateDb(getStoreStateName(kRebuildLedger, OFFER), "1");
 }
 
 void
diff --git a/src/main/PersistentState.h b/src/main/PersistentState.h
index c22cd59e57..7dc359ae2e 100644
--- a/src/main/PersistentState.h
+++ b/src/main/PersistentState.h
@@ -46,9 +46,9 @@ class PersistentState
     setSCPStateV1ForSlot(uint64 slot, std::string const& value,
                          std::unordered_map<Hash, std::string> const& txSets);
 
-    bool shouldRebuildForType(LedgerEntryType let);
-    void clearRebuildForType(LedgerEntryType let);
-    void setRebuildForType(LedgerEntryType let);
+    bool shouldRebuildForOfferTable();
+    void clearRebuildForOfferTable();
+    void setRebuildForOfferTable();
 
     bool hasTxSet(Hash const& txSetHash);
     void deleteTxSets(std::unordered_set<Hash> hashesToDelete);
diff --git a/src/main/QueryServer.cpp b/src/main/QueryServer.cpp
index 97657105a1..95f1d80a44 100644
--- a/src/main/QueryServer.cpp
+++ b/src/main/QueryServer.cpp
@@ -66,8 +66,8 @@ QueryServer::QueryServer(const std::string& address, unsigned short port,
     auto workerPids = mServer.start();
     for (auto pid : workerPids)
     {
-        mBucketListSnapshots[pid] =
-            std::move(bucketSnapshotManager.copySearchableBucketListSnapshot());
+        mBucketListSnapshots[pid] = std::move(
+            bucketSnapshotManager.copySearchableLiveBucketListSnapshot());
     }
 }
 
@@ -149,16 +149,17 @@ QueryServer::getLedgerEntryRaw(std::string const& params,
         {
             root["ledgerSeq"] = *snapshotLedger;
 
-            bool snapshotExists;
-            std::tie(loadedKeys, snapshotExists) =
+            auto loadedKeysOp =
                 bl.loadKeysFromLedger(orderedKeys, *snapshotLedger);
 
             // Return 404 if ledgerSeq not found
-            if (!snapshotExists)
+            if (!loadedKeysOp)
             {
                 retStr = "LedgerSeq not found";
                 return false;
             }
+
+            loadedKeys = std::move(*loadedKeysOp);
         }
         // Otherwise default to current ledger
         else
diff --git a/src/main/QueryServer.h b/src/main/QueryServer.h
index f16a79c945..53ee434087 100644
--- a/src/main/QueryServer.h
+++ b/src/main/QueryServer.h
@@ -14,7 +14,7 @@
 
 namespace stellar
 {
-class SearchableBucketListSnapshot;
+class SearchableLiveBucketListSnapshot;
 class BucketSnapshotManager;
 
 class QueryServer
@@ -26,7 +26,7 @@ class QueryServer
     httpThreaded::server::server mServer;
 
     std::unordered_map<std::thread::id,
-                       std::shared_ptr<SearchableBucketListSnapshot>>
+                       std::shared_ptr<SearchableLiveBucketListSnapshot>>
         mBucketListSnapshots;
 
     bool safeRouter(HandlerRoute route, std::string const& params,
diff --git a/src/main/test/ApplicationUtilsTests.cpp b/src/main/test/ApplicationUtilsTests.cpp
index bc3e1e8451..31ee9dbba9 100644
--- a/src/main/test/ApplicationUtilsTests.cpp
+++ b/src/main/test/ApplicationUtilsTests.cpp
@@ -2,11 +2,13 @@
 // under the Apache License, Version 2.0. See the COPYING file at the root
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
+#include "bucket/test/BucketTestUtils.h"
 #include "crypto/Random.h"
 #include "history/HistoryArchiveManager.h"
 #include "history/test/HistoryTestsUtils.h"
 #include "invariant/BucketListIsConsistentWithDatabase.h"
 #include "ledger/LedgerTxn.h"
+#include "ledger/test/LedgerTestUtils.h"
 #include "lib/catch.hpp"
 #include "main/Application.h"
 #include "main/ApplicationUtils.h"
@@ -51,45 +53,6 @@ class TemporaryFileDamager
     }
 };
 
-class TemporarySQLiteDBDamager : public TemporaryFileDamager
-{
-    Config mConfig;
-    static std::filesystem::path
-    getSQLiteDBPath(Config const& cfg)
-    {
-        auto str = cfg.DATABASE.value;
-        std::string prefix = "sqlite3://";
-        REQUIRE(str.find(prefix) == 0);
-        str = str.substr(prefix.size());
-        REQUIRE(!str.empty());
-        std::filesystem::path path(str);
-        REQUIRE(std::filesystem::exists(path));
-        return path;
-    }
-
-  public:
-    TemporarySQLiteDBDamager(Config const& cfg)
-        : TemporaryFileDamager(getSQLiteDBPath(cfg)), mConfig(cfg)
-    {
-    }
-    void
-    damageVictim() override
-    {
-        // Damage a database by bumping the root account's last-modified.
-        VirtualClock clock;
-        auto app = createTestApplication(clock, mConfig, /*newDB=*/false);
-        LedgerTxn ltx(app->getLedgerTxnRoot(),
-                      /*shouldUpdateLastModified=*/false);
-        {
-            auto rootKey = accountKey(
-                stellar::txtest::getRoot(app->getNetworkID()).getPublicKey());
-            auto rootLe = ltx.load(rootKey);
-            rootLe.current().lastModifiedLedgerSeq += 1;
-        }
-        ltx.commit();
-    }
-};
-
 // Logic to check the state of the bucket list with the state of the DB
 static bool
 checkState(Application& app)
@@ -107,7 +70,7 @@ checkState(Application& app)
         blcOk = false;
     }
 
-    if (app.getConfig().isUsingBucketListDB())
+    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         auto checkBucket = [&blcOk](auto b) {
             if (!b->isEmpty() && !b->isIndexed())
@@ -120,16 +83,17 @@ checkState(Application& app)
         };
 
         auto& bm = app.getBucketManager();
-        for (uint32_t i = 0; i < bm.getBucketList().kNumLevels && blcOk; ++i)
+        for (uint32_t i = 0; i < bm.getLiveBucketList().kNumLevels && blcOk;
+             ++i)
         {
-            auto& level = bm.getBucketList().getLevel(i);
+            auto& level = bm.getLiveBucketList().getLevel(i);
             checkBucket(level.getCurr());
             checkBucket(level.getSnap());
             auto& nextFuture = level.getNext();
             if (nextFuture.hasOutputHash())
             {
                 auto hash = hexToBin256(nextFuture.getOutputHash());
-                checkBucket(bm.getBucketByHash(hash));
+                checkBucket(bm.getLiveBucketByHash(hash));
             }
         }
     }
@@ -309,82 +273,6 @@ class SimulationHelper
     {
         mSimulation->removeNode(mTestNodeID);
     }
-
-    void
-    runStartupTest(bool triggerCatchup, uint32_t startFromLedger,
-                   std::string startFromHash, uint32_t lclLedgerSeq)
-    {
-        bool isInMemoryMode = startFromLedger != 0 && !startFromHash.empty();
-        if (isInMemoryMode)
-        {
-            REQUIRE(canRebuildInMemoryLedgerFromBuckets(startFromLedger,
-                                                        lclLedgerSeq));
-        }
-
-        uint32_t checkpointFrequency = 8;
-
-        // Depending on how many ledgers we buffer during bucket
-        // apply, core might trim some and only keep checkpoint
-        // ledgers. In this case, after bucket application, normal
-        // catchup will be triggered.
-        uint32_t delayBuckets = triggerCatchup ? (2 * checkpointFrequency)
-                                               : (checkpointFrequency / 2);
-        mTestCfg.ARTIFICIALLY_DELAY_BUCKET_APPLICATION_FOR_TESTING =
-            std::chrono::seconds(delayBuckets);
-
-        // Start test app
-        auto app = mSimulation->addNode(mTestNodeSecretKey, mQuorum, &mTestCfg,
-                                        false, startFromLedger, startFromHash);
-        mSimulation->addPendingConnection(mMainNodeID, mTestNodeID);
-        REQUIRE(app);
-        mSimulation->startAllNodes();
-
-        // Ensure nodes are connected
-        if (!app->getConfig().MODE_AUTO_STARTS_OVERLAY)
-        {
-            app->getOverlayManager().start();
-        }
-
-        if (isInMemoryMode)
-        {
-            REQUIRE(app->getLedgerManager().getState() ==
-                    LedgerManager::LM_CATCHING_UP_STATE);
-        }
-
-        auto downloaded =
-            app->getCatchupManager().getCatchupMetrics().mCheckpointsDownloaded;
-
-        Upgrades::UpgradeParameters scheduledUpgrades;
-        scheduledUpgrades.mUpgradeTime =
-            VirtualClock::from_time_t(mMainNode->getLedgerManager()
-                                          .getLastClosedLedgerHeader()
-                                          .header.scpValue.closeTime);
-        scheduledUpgrades.mProtocolVersion =
-            static_cast<uint32_t>(SOROBAN_PROTOCOL_VERSION);
-        mMainNode->getHerder().setUpgrades(scheduledUpgrades);
-
-        generateLoad(false);
-        generateLoad(true);
-
-        // State has been rebuilt and node is properly in sync
-        REQUIRE(checkState(*app));
-        REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() ==
-                getMainNodeLCL().header.ledgerSeq);
-        REQUIRE(app->getLedgerManager().isSynced());
-
-        if (triggerCatchup)
-        {
-            REQUIRE(downloaded < app->getCatchupManager()
-                                     .getCatchupMetrics()
-                                     .mCheckpointsDownloaded);
-        }
-        else
-        {
-            REQUIRE(downloaded == app->getCatchupManager()
-                                      .getCatchupMetrics()
-                                      .mCheckpointsDownloaded);
-        }
-    }
 };
 
 TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]")
@@ -401,7 +289,7 @@ TEST_CASE("verify checkpoints command - wait condition", "[applicationutils]")
     qSet.validators.push_back(vNode1NodeID);
 
     Config cfg1 = getTestConfig(1);
-    Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY);
     cfg2.FORCE_SCP = false;
     cfg2.NODE_IS_VALIDATOR = false;
     cfg2.MODE_DOES_CATCHUP = false;
@@ -448,12 +336,12 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]")
         // Step 2: make a new application and catch it up part-way to the
         // archives (but behind).
         auto app = catchupSimulation.createCatchupApplication(
-            std::numeric_limits<uint32_t>::max(), Config::TESTDB_ON_DISK_SQLITE,
-            "client");
+            std::numeric_limits<uint32_t>::max(),
+            Config::TESTDB_BUCKET_DB_PERSISTENT, "client");
         catchupSimulation.catchupOffline(app, l1);
         chkConfig = app->getConfig();
         victimBucketPath = app->getBucketManager()
-                               .getBucketList()
+                               .getLiveBucketList()
                                .getLevel(0)
                                .getCurr()
                                ->getFilename();
@@ -490,146 +378,14 @@ TEST_CASE("offline self-check works", "[applicationutils][selfcheck]")
         damage.damageVictim();
         REQUIRE(selfCheck(chkConfig) == 1);
     }
-    {
-        // Damage the SQL ledger.
-        TemporarySQLiteDBDamager damage(chkConfig);
-        damage.damageVictim();
-        REQUIRE(selfCheck(chkConfig) == 1);
-    }
 }
 
 TEST_CASE("application setup", "[applicationutils]")
 {
     VirtualClock clock;
-
-    SECTION("SQL DB mode")
-    {
-        auto cfg = getTestConfig();
-        auto app = setupApp(cfg, clock, 0, "");
-        REQUIRE(checkState(*app));
-    }
-
-    auto testInMemoryMode = [&](Config& cfg1, Config& cfg2) {
-        // Publish a few checkpoints then shut down test node
-        auto simulation = SimulationHelper(cfg1, cfg2);
-        auto [startFromLedger, startFromHash] =
-            simulation.publishCheckpoints(2);
-        auto lcl = simulation.getTestNodeLCL();
-        simulation.shutdownTestNode();
-
-        SECTION("minimal DB setup")
-        {
-            SECTION("not found")
-            {
-                // Remove `buckets` dir completely
-                fs::deltree(cfg2.BUCKET_DIR_PATH);
-
-                // Initialize new minimal DB from scratch
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-            SECTION("found")
-            {
-                // Found existing minimal DB, reset to genesis
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-        }
-        SECTION("rebuild state")
-        {
-            SECTION("from buckets")
-            {
-                auto selectedLedger = lcl.header.ledgerSeq;
-                auto selectedHash = binToHex(lcl.hash);
-
-                SECTION("replay buffered ledgers")
-                {
-                    simulation.runStartupTest(false, selectedLedger,
-                                              selectedHash,
-                                              lcl.header.ledgerSeq);
-                }
-                SECTION("trigger catchup")
-                {
-                    simulation.runStartupTest(true, selectedLedger,
-                                              selectedHash,
-                                              lcl.header.ledgerSeq);
-                }
-                SECTION("start from future ledger")
-                {
-                    // Validator publishes more checkpoints while the
-                    // captive-core instance is shutdown
-                    auto [selectedLedger2, selectedHash2] =
-                        simulation.publishCheckpoints(4);
-                    simulation.runStartupTest(true, selectedLedger2,
-                                              selectedHash2,
-                                              lcl.header.ledgerSeq);
-                }
-            }
-            SECTION("via catchup")
-            {
-                // startAtLedger is behind LCL, reset to genesis and catchup
-                REQUIRE(!canRebuildInMemoryLedgerFromBuckets(
-                    startFromLedger, lcl.header.ledgerSeq));
-                auto app =
-                    setupApp(cfg2, clock, startFromLedger, startFromHash);
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-                REQUIRE(app->getLedgerManager().getLastClosedLedgerNum() ==
-                        startFromLedger);
-                REQUIRE(app->getLedgerManager().getState() ==
-                        LedgerManager::LM_CATCHING_UP_STATE);
-            }
-
-            SECTION("bad hash")
-            {
-                // Create mismatch between start-from ledger and hash
-                auto app =
-                    setupApp(cfg2, clock, startFromLedger + 1, startFromHash);
-                REQUIRE(!app);
-            }
-        }
-        SECTION("set meta stream")
-        {
-            TmpDirManager tdm(std::string("streamtmp-") +
-                              binToHex(randomBytes(8)));
-            TmpDir td = tdm.tmpDir("streams");
-            std::string path = td.getName() + "/stream.xdr";
-
-            // Remove `buckets` dir completely to ensure multiple apps are
-            // initialized during setup
-            fs::deltree(cfg2.BUCKET_DIR_PATH);
-            SECTION("file path")
-            {
-                cfg2.METADATA_OUTPUT_STREAM = path;
-
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-#ifdef _WIN32
-#else
-            SECTION("fd")
-            {
-                int fd = ::open(path.c_str(), O_CREAT | O_WRONLY, 0644);
-                REQUIRE(fd != -1);
-                cfg2.METADATA_OUTPUT_STREAM = fmt::format("fd:{}", fd);
-
-                auto app = setupApp(cfg2, clock, 0, "");
-                REQUIRE(app);
-                REQUIRE(checkState(*app));
-            }
-#endif
-        }
-    };
-    SECTION("in memory mode")
-    {
-        Config cfg1 = getTestConfig(1);
-        Config cfg2 = getTestConfig(2, Config::TESTDB_IN_MEMORY_NO_OFFERS);
-        cfg2.DATABASE = SecretValue{minimalDBForInMemoryMode(cfg2)};
-        testInMemoryMode(cfg1, cfg2);
-    }
+    auto cfg = getTestConfig();
+    auto app = setupApp(cfg, clock);
+    REQUIRE(checkState(*app));
 }
 
 TEST_CASE("application major version numbers", "[applicationutils]")
diff --git a/src/main/test/ConfigTests.cpp b/src/main/test/ConfigTests.cpp
index 29938731a9..5c5d8c8e36 100644
--- a/src/main/test/ConfigTests.cpp
+++ b/src/main/test/ConfigTests.cpp
@@ -286,7 +286,6 @@ TEST_CASE("bad validators configs", "[config]")
 NODE_SEED="SA7FGJMMUIHNE3ZPI2UO5I632A7O5FBAZTXFAIEVFA4DSSGLHXACLAIT a3"
 {NODE_HOME_DOMAIN}
 NODE_IS_VALIDATOR=true
-DEPRECATED_SQL_LEDGER_STATE=true
 
 ############################
 # list of HOME_DOMAINS
@@ -473,9 +472,7 @@ TEST_CASE("nesting level", "[config]")
         auto secretKey = SecretKey::fromSeed(hash);
         return secretKey.getStrKeyPublic();
     };
-    std::string configNesting =
-        "DEPRECATED_SQL_LEDGER_STATE=true\n" // Required for all configs
-        "UNSAFE_QUORUM=true";
+    std::string configNesting = "UNSAFE_QUORUM=true";
     std::string quorumSetNumber = "";
     std::string quorumSetTemplate = R"(
 
@@ -536,7 +533,6 @@ TEST_CASE("operation filter configuration", "[config]")
         };
 
         std::stringstream ss;
-        ss << "DEPRECATED_SQL_LEDGER_STATE=true\n"; // required for all configs
         ss << "UNSAFE_QUORUM=true\n";
         toConfigStr(vals, ss);
         ss << "\n[QUORUM_SET]\n";
diff --git a/src/main/test/ExternalQueueTests.cpp b/src/main/test/ExternalQueueTests.cpp
deleted file mode 100644
index c44713ea7f..0000000000
--- a/src/main/test/ExternalQueueTests.cpp
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2014 Stellar Development Foundation and contributors. Licensed
-// under the Apache License, Version 2.0. See the COPYING file at the root
-// of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
-
-#include "lib/catch.hpp"
-#include "main/Application.h"
-#include "main/CommandHandler.h"
-#include "main/Config.h"
-#include "main/ExternalQueue.h"
-#include "simulation/Simulation.h"
-#include "test/TestUtils.h"
-#include "test/test.h"
-
-using namespace stellar;
-
-TEST_CASE("cursors", "[externalqueue]")
-{
-    VirtualClock clock;
-    Config const& cfg = getTestConfig(0, Config::TESTDB_ON_DISK_SQLITE);
-    Application::pointer app = createTestApplication(clock, cfg);
-
-    ExternalQueue ps(*app);
-    std::map<std::string, uint32> curMap;
-    app->getCommandHandler().manualCmd("setcursor?id=FOO&cursor=123");
-    app->getCommandHandler().manualCmd("setcursor?id=BAR&cursor=456");
-
-    SECTION("get non-existent cursor")
-    {
-        ps.getCursorForResource("NONEXISTENT", curMap);
-        REQUIRE(curMap.size() == 0);
-    }
-
-    SECTION("get single cursor")
-    {
-        ps.getCursorForResource("FOO", curMap);
-        REQUIRE(curMap.size() == 1);
-    }
-
-    SECTION("get all cursors")
-    {
-        ps.getCursorForResource("", curMap);
-        REQUIRE(curMap.size() == 2);
-    }
-}
diff --git a/src/overlay/test/FloodTests.cpp b/src/overlay/test/FloodTests.cpp
index e224530931..e2f9b50ac1 100644
--- a/src/overlay/test/FloodTests.cpp
+++ b/src/overlay/test/FloodTests.cpp
@@ -73,9 +73,8 @@ TEST_CASE("Flooding", "[flood][overlay][acceptance]")
                     auto const& header = n->getLedgerManager()
                                              .getLastClosedLedgerHeader()
                                              .header;
-                    BucketTestUtils::addBatchAndUpdateSnapshot(
-                        n->getBucketManager().getBucketList(), *n, header, {},
-                        {gen}, {});
+                    BucketTestUtils::addLiveBatchAndUpdateSnapshot(
+                        *n, header, {}, {gen}, {});
                 }
             }
         }
diff --git a/src/overlay/test/OverlayTests.cpp b/src/overlay/test/OverlayTests.cpp
index 7d29ae7c66..ff4cf25554 100644
--- a/src/overlay/test/OverlayTests.cpp
+++ b/src/overlay/test/OverlayTests.cpp
@@ -140,8 +140,8 @@ TEST_CASE("flow control byte capacity", "[overlay][flowcontrol]")
 {
     VirtualClock clock;
 
-    auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
-    auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg1 = getTestConfig(0, Config::TESTDB_IN_MEMORY);
+    auto cfg2 = getTestConfig(1, Config::TESTDB_IN_MEMORY);
     REQUIRE(cfg1.PEER_FLOOD_READING_CAPACITY !=
             cfg1.PEER_FLOOD_READING_CAPACITY_BYTES);
 
diff --git a/src/simulation/CoreTests.cpp b/src/simulation/CoreTests.cpp
index 30f1bffd55..423b77e211 100644
--- a/src/simulation/CoreTests.cpp
+++ b/src/simulation/CoreTests.cpp
@@ -686,9 +686,8 @@ TEST_CASE("Bucket list entries vs write throughput", "[scalability][!hide]")
         LedgerHeader lh;
         lh.ledgerVersion = Config::CURRENT_LEDGER_PROTOCOL_VERSION;
         lh.ledgerSeq = i;
-        BucketTestUtils::addBatchAndUpdateSnapshot(
-            app->getBucketManager().getBucketList(), *app, lh,
-            LedgerTestUtils::generateValidLedgerEntries(100),
+        BucketTestUtils::addLiveBatchAndUpdateSnapshot(
+            *app, lh, LedgerTestUtils::generateValidLedgerEntries(100),
             LedgerTestUtils::generateValidLedgerEntries(20),
             LedgerTestUtils::generateValidLedgerEntryKeysWithExclusions(
                 {CONFIG_SETTING}, 5));
diff --git a/src/simulation/Simulation.cpp b/src/simulation/Simulation.cpp
index 0818922f6a..61413b94ed 100644
--- a/src/simulation/Simulation.cpp
+++ b/src/simulation/Simulation.cpp
@@ -91,8 +91,7 @@ Simulation::setCurrentVirtualTime(VirtualClock::system_time_point t)
 
 Application::pointer
 Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2,
-                    bool newDB, uint32_t startAtLedger,
-                    std::string const& startAtHash)
+                    bool newDB)
 {
     auto cfg = cfg2 ? std::make_shared<Config>(*cfg2)
                     : std::make_shared<Config>(newConfig());
@@ -140,7 +139,7 @@ Simulation::addNode(SecretKey nodeKey, SCPQuorumSet qSet, Config const* cfg2,
     }
     else
     {
-        app = setupApp(*cfg, *clock, startAtLedger, startAtHash);
+        app = setupApp(*cfg, *clock);
     }
     mNodes.emplace(nodeKey.getPublicKey(), Node{clock, app});
 
diff --git a/src/simulation/Simulation.h b/src/simulation/Simulation.h
index 8743af37f2..e1385f374d 100644
--- a/src/simulation/Simulation.h
+++ b/src/simulation/Simulation.h
@@ -50,9 +50,8 @@ class Simulation
     // Add new node to the simulation. This function does not start the node.
     // Callers are expected to call `start` or `startAllNodes` manually.
     Application::pointer addNode(SecretKey nodeKey, SCPQuorumSet qSet,
-                                 Config const* cfg = nullptr, bool newDB = true,
-                                 uint32_t startAtLedger = 0,
-                                 std::string const& startAtHash = "");
+                                 Config const* cfg = nullptr,
+                                 bool newDB = true);
     Application::pointer getNode(NodeID nodeID);
     std::vector<Application::pointer> getNodes();
     std::vector<NodeID> getNodeIDs();
diff --git a/src/simulation/test/LoadGeneratorTests.cpp b/src/simulation/test/LoadGeneratorTests.cpp
index fd31de9b05..c2b6a6bf85 100644
--- a/src/simulation/test/LoadGeneratorTests.cpp
+++ b/src/simulation/test/LoadGeneratorTests.cpp
@@ -24,7 +24,6 @@ TEST_CASE("generate load in protocol 1")
             auto cfg = getTestConfig(i);
             cfg.TESTING_UPGRADE_MAX_TX_SET_SIZE = 5000;
             cfg.TESTING_UPGRADE_LEDGER_PROTOCOL_VERSION = 1;
-            cfg.DEPRECATED_SQL_LEDGER_STATE = false;
             return cfg;
         });
 
diff --git a/src/test/FuzzerImpl.cpp b/src/test/FuzzerImpl.cpp
index ea19953621..06f24a5844 100644
--- a/src/test/FuzzerImpl.cpp
+++ b/src/test/FuzzerImpl.cpp
@@ -864,7 +864,6 @@ getFuzzConfig(int instanceNumber)
     Config cfg = getTestConfig(instanceNumber);
     cfg.MANUAL_CLOSE = true;
     cfg.CATCHUP_COMPLETE = false;
-    cfg.BACKGROUND_EVICTION_SCAN = false;
     cfg.CATCHUP_RECENT = 0;
     cfg.ARTIFICIALLY_GENERATE_LOAD_FOR_TESTING = false;
     cfg.ARTIFICIALLY_SET_CLOSE_TIME_FOR_TESTING = UINT32_MAX;
diff --git a/src/test/TestUtils.cpp b/src/test/TestUtils.cpp
index 7750fe345a..c4fb9886fb 100644
--- a/src/test/TestUtils.cpp
+++ b/src/test/TestUtils.cpp
@@ -3,6 +3,7 @@
 // of this distribution or at http://www.apache.org/licenses/LICENSE-2.0
 
 #include "TestUtils.h"
+#include "bucket/BucketList.h"
 #include "overlay/test/LoopbackPeer.h"
 #include "simulation/LoadGenerator.h"
 #include "simulation/Simulation.h"
@@ -128,16 +129,21 @@ computeMultiplier(LedgerEntry const& le)
     }
 }
 
-BucketListDepthModifier::BucketListDepthModifier(uint32_t newDepth)
-    : mPrevDepth(BucketList::kNumLevels)
+template <class BucketT>
+BucketListDepthModifier<BucketT>::BucketListDepthModifier(uint32_t newDepth)
+    : mPrevDepth(BucketListBase<BucketT>::kNumLevels)
 {
-    BucketList::kNumLevels = newDepth;
+    BucketListBase<BucketT>::kNumLevels = newDepth;
 }
 
-BucketListDepthModifier::~BucketListDepthModifier()
+template <class BucketT>
+BucketListDepthModifier<BucketT>::~BucketListDepthModifier()
 {
-    BucketList::kNumLevels = mPrevDepth;
+    BucketListBase<BucketT>::kNumLevels = mPrevDepth;
 }
+
+template class BucketListDepthModifier<LiveBucket>;
+template class BucketListDepthModifier<HotArchiveBucket>;
 }
 
 TestInvariantManager::TestInvariantManager(medida::MetricsRegistry& registry)
@@ -285,7 +291,7 @@ modifySorobanNetworkConfig(Application& app,
 
     // Need to close a ledger following call to `addBatch` from config upgrade
     // to refresh cached state
-    if (app.getConfig().isUsingBucketListDB())
+    if (!app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
     {
         txtest::closeLedger(app);
     }
diff --git a/src/test/TestUtils.h b/src/test/TestUtils.h
index 83f3e6d4f9..21b3882e6c 100644
--- a/src/test/TestUtils.h
+++ b/src/test/TestUtils.h
@@ -32,8 +32,11 @@ std::vector<Asset> getInvalidAssets(SecretKey const& issuer);
 
 int32_t computeMultiplier(LedgerEntry const& le);
 
-class BucketListDepthModifier
+template <class BucketT> class BucketListDepthModifier
 {
+    static_assert(std::is_same_v<BucketT, LiveBucket> ||
+                  std::is_same_v<BucketT, HotArchiveBucket>);
+
     uint32_t const mPrevDepth;
 
   public:
diff --git a/src/test/TxTests.cpp b/src/test/TxTests.cpp
index b306e737c1..86bf9bbcf7 100644
--- a/src/test/TxTests.cpp
+++ b/src/test/TxTests.cpp
@@ -387,9 +387,13 @@ checkTransaction(TransactionTestFrame& txFrame, Application& app)
 void
 applyTx(TransactionTestFramePtr const& tx, Application& app, bool checkSeqNum)
 {
+    if (app.getConfig().MODE_USES_IN_MEMORY_LEDGER)
+    {
+        applyCheck(tx, app, checkSeqNum);
+    }
     // We cannot commit directly to the DB if running BucketListDB, so close a
     // ledger with the TX instead
-    if (app.getConfig().isUsingBucketListDB())
+    else
     {
         auto resultSet = closeLedger(app, {tx});
 
@@ -404,10 +408,6 @@ applyTx(TransactionTestFramePtr const& tx, Application& app, bool checkSeqNum)
         REQUIRE(meta.size() == 1);
         recordOrCheckGlobalTestTxMetadata(meta.back().getXDR());
     }
-    else
-    {
-        applyCheck(tx, app, checkSeqNum);
-    }
 
     throwIf(tx->getResult());
     checkTransaction(*tx, app);
diff --git a/src/test/test.cpp b/src/test/test.cpp
index 1227314e78..d7ea4668e5 100644
--- a/src/test/test.cpp
+++ b/src/test/test.cpp
@@ -194,10 +194,10 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode)
     instanceNumber += gBaseInstance;
     if (mode == Config::TESTDB_DEFAULT)
     {
-        // by default, tests should be run with in memory SQLITE as it's faster
-        // you can change this by enabling the appropriate line below
-        // mode = Config::TESTDB_IN_MEMORY_OFFERS;
-        // mode = Config::TESTDB_ON_DISK_SQLITE;
+        // by default, tests should be run with volatile BucketList as it's
+        // faster. You can change this by enabling the appropriate line below
+        // mode = Config::TESTDB_IN_MEMORY;
+        // mode = Config::TESTDB_BUCKET_DB_PERSISTENT;
         // mode = Config::TESTDB_POSTGRESQL;
         mode = Config::TESTDB_BUCKET_DB_VOLATILE;
     }
@@ -283,11 +283,10 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode)
         switch (mode)
         {
         case Config::TESTDB_BUCKET_DB_VOLATILE:
-        case Config::TESTDB_IN_MEMORY_OFFERS:
+        case Config::TESTDB_IN_MEMORY:
             dbname << "sqlite3://:memory:";
             break;
         case Config::TESTDB_BUCKET_DB_PERSISTENT:
-        case Config::TESTDB_ON_DISK_SQLITE:
             dbname << "sqlite3://" << rootDir << "test.db";
             thisConfig.DISABLE_XDR_FSYNC = false;
             break;
@@ -296,30 +295,17 @@ getTestConfig(int instanceNumber, Config::TestDbMode mode)
             dbname << "postgresql://dbname=test" << instanceNumber;
             thisConfig.DISABLE_XDR_FSYNC = false;
             break;
-        case Config::TESTDB_IN_MEMORY_NO_OFFERS:
-            thisConfig.MODE_USES_IN_MEMORY_LEDGER = true;
-            break;
 #endif
         default:
             abort();
         }
 
-        if (mode == Config::TESTDB_BUCKET_DB_VOLATILE ||
-            mode == Config::TESTDB_BUCKET_DB_PERSISTENT)
-        {
-            thisConfig.DEPRECATED_SQL_LEDGER_STATE = false;
-            thisConfig.BACKGROUND_EVICTION_SCAN = true;
-        }
-        else
+        if (mode == Config::TESTDB_IN_MEMORY)
         {
-            thisConfig.DEPRECATED_SQL_LEDGER_STATE = true;
-            thisConfig.BACKGROUND_EVICTION_SCAN = false;
+            thisConfig.MODE_USES_IN_MEMORY_LEDGER = true;
         }
 
-        if (mode != Config::TESTDB_IN_MEMORY_NO_OFFERS)
-        {
-            thisConfig.DATABASE = SecretValue{dbname.str()};
-        }
+        thisConfig.DATABASE = SecretValue{dbname.str()};
 
         thisConfig.REPORT_METRICS = gTestMetrics;
         // disable maintenance
@@ -516,6 +502,13 @@ for_versions_from(std::vector<uint32> const& versions, Application& app,
     for_versions_from(versions.back() + 1, app, f);
 }
 
+void
+for_versions_from(uint32 from, Config const& cfg,
+                  std::function<void(Config const&)> const& f)
+{
+    for_versions(from, Config::CURRENT_LEDGER_PROTOCOL_VERSION, cfg, f);
+}
+
 void
 for_all_versions(Application& app, std::function<void(void)> const& f)
 {
diff --git a/src/test/test.h b/src/test/test.h
index ad41a1f5f1..75a98adbaa 100644
--- a/src/test/test.h
+++ b/src/test/test.h
@@ -55,6 +55,9 @@ void for_versions_from(uint32 from, Application& app,
 void for_versions_from(std::vector<uint32> const& versions, Application& app,
                        std::function<void(void)> const& f);
 
+void for_versions_from(uint32 from, Config const& cfg,
+                       std::function<void(Config const&)> const& f);
+
 void for_all_versions(Application& app, std::function<void(void)> const& f);
 
 void for_all_versions(Config const& cfg,
diff --git a/src/transactions/TransactionSQL.cpp b/src/transactions/TransactionSQL.cpp
index ff37172218..9e87fb56a8 100644
--- a/src/transactions/TransactionSQL.cpp
+++ b/src/transactions/TransactionSQL.cpp
@@ -363,19 +363,9 @@ storeTransaction(Database& db, uint32_t ledgerSeq,
     uint32_t txIndex = static_cast<uint32_t>(resultSet.results.size());
 
     std::string sqlStr;
-    if (cfg.isUsingBucketListDB())
-    {
-        sqlStr = "INSERT INTO txhistory "
-                 "( txid, ledgerseq, txindex,  txbody, txresult) VALUES "
-                 "(:id,  :seq,      :txindex, :txb,   :txres)";
-    }
-    else
-    {
-        sqlStr =
-            "INSERT INTO txhistory "
-            "( txid, ledgerseq, txindex,  txbody, txresult, txmeta) VALUES "
-            "(:id,  :seq,      :txindex, :txb,   :txres,   :meta)";
-    }
+    sqlStr = "INSERT INTO txhistory "
+             "( txid, ledgerseq, txindex,  txbody, txresult) VALUES "
+             "(:id,  :seq,      :txindex, :txb,   :txres)";
 
     auto prep = db.getPreparedStatement(sqlStr);
     auto& st = prep.statement();
@@ -385,11 +375,6 @@ storeTransaction(Database& db, uint32_t ledgerSeq,
     st.exchange(soci::use(txBody));
     st.exchange(soci::use(txResult));
 
-    if (!cfg.isUsingBucketListDB())
-    {
-        st.exchange(soci::use(meta));
-    }
-
     st.define_and_bind();
     {
         auto timer = db.getInsertTimer("txhistory");
@@ -581,20 +566,14 @@ dropTransactionHistory(Database& db, Config const& cfg)
 {
     ZoneScoped;
     db.getSession() << "DROP TABLE IF EXISTS txhistory";
-
-    // txmeta only supported when BucketListDB is not enabled
-    std::string txMetaColumn =
-        cfg.isUsingBucketListDB() ? "" : "txmeta      TEXT NOT NULL,";
-
     db.getSession() << "CREATE TABLE txhistory ("
                        "txid        CHARACTER(64) NOT NULL,"
                        "ledgerseq   INT NOT NULL CHECK (ledgerseq >= 0),"
                        "txindex     INT NOT NULL,"
                        "txbody      TEXT NOT NULL,"
-                       "txresult    TEXT NOT NULL," +
-                           txMetaColumn +
-                           "PRIMARY KEY (ledgerseq, txindex)"
-                           ")";
+                       "txresult    TEXT NOT NULL,"
+                       "PRIMARY KEY (ledgerseq, txindex)"
+                       ")";
 
     db.getSession() << "CREATE INDEX histbyseq ON txhistory (ledgerseq);";
 
diff --git a/src/transactions/test/AllowTrustTests.cpp b/src/transactions/test/AllowTrustTests.cpp
index 398bee5e28..43c25f3824 100644
--- a/src/transactions/test/AllowTrustTests.cpp
+++ b/src/transactions/test/AllowTrustTests.cpp
@@ -82,7 +82,7 @@ template <int V> struct TestStub
         TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST
                                     : TrustFlagOp::SET_TRUST_LINE_FLAGS;
 
-        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
         VirtualClock clock;
         auto app = createTestApplication(clock, cfg);
@@ -377,7 +377,7 @@ template <int V> struct TestStub
         TrustFlagOp flagOp = V == 0 ? TrustFlagOp::ALLOW_TRUST
                                     : TrustFlagOp::SET_TRUST_LINE_FLAGS;
 
-        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
         VirtualClock clock;
         auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/BumpSequenceTests.cpp b/src/transactions/test/BumpSequenceTests.cpp
index f8a43d42ca..9a09b171f2 100644
--- a/src/transactions/test/BumpSequenceTests.cpp
+++ b/src/transactions/test/BumpSequenceTests.cpp
@@ -25,7 +25,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("bump sequence", "[tx][bumpsequence]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ChangeTrustTests.cpp b/src/transactions/test/ChangeTrustTests.cpp
index fcb09d4af6..e6e021264d 100644
--- a/src/transactions/test/ChangeTrustTests.cpp
+++ b/src/transactions/test/ChangeTrustTests.cpp
@@ -23,7 +23,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("change trust", "[tx][changetrust]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
@@ -303,7 +303,7 @@ TEST_CASE_VERSIONS("change trust", "[tx][changetrust]")
 TEST_CASE_VERSIONS("change trust pool share trustline",
                    "[tx][changetrust][liquiditypool]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ClaimableBalanceTests.cpp b/src/transactions/test/ClaimableBalanceTests.cpp
index f90e37633b..afc076258a 100644
--- a/src/transactions/test/ClaimableBalanceTests.cpp
+++ b/src/transactions/test/ClaimableBalanceTests.cpp
@@ -298,7 +298,7 @@ validateBalancesOnCreateAndClaim(TestAccount& createAcc, TestAccount& claimAcc,
 
 TEST_CASE_VERSIONS("claimableBalance", "[tx][claimablebalance]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ClawbackClaimableBalanceTests.cpp b/src/transactions/test/ClawbackClaimableBalanceTests.cpp
index 195dd9aee0..b43bc5a015 100644
--- a/src/transactions/test/ClawbackClaimableBalanceTests.cpp
+++ b/src/transactions/test/ClawbackClaimableBalanceTests.cpp
@@ -19,7 +19,7 @@ using namespace stellar::txtest;
 TEST_CASE_VERSIONS("clawbackClaimableBalance",
                    "[tx][clawback][claimablebalance]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ClawbackTests.cpp b/src/transactions/test/ClawbackTests.cpp
index eee797d441..f0238c35f1 100644
--- a/src/transactions/test/ClawbackTests.cpp
+++ b/src/transactions/test/ClawbackTests.cpp
@@ -17,7 +17,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("clawback", "[tx][clawback]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/CreateAccountTests.cpp b/src/transactions/test/CreateAccountTests.cpp
index fb8ed2e424..aedb03989c 100644
--- a/src/transactions/test/CreateAccountTests.cpp
+++ b/src/transactions/test/CreateAccountTests.cpp
@@ -31,7 +31,7 @@ TEST_CASE_VERSIONS("create account", "[tx][createaccount]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto root = TestAccount::createRoot(*app);
diff --git a/src/transactions/test/EndSponsoringFutureReservesTests.cpp b/src/transactions/test/EndSponsoringFutureReservesTests.cpp
index a92b6e2281..9220ac2617 100644
--- a/src/transactions/test/EndSponsoringFutureReservesTests.cpp
+++ b/src/transactions/test/EndSponsoringFutureReservesTests.cpp
@@ -34,7 +34,7 @@ TEST_CASE_VERSIONS("confirm and clear sponsor", "[tx][sponsorship]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto root = TestAccount::createRoot(*app);
     int64_t minBalance = app->getLedgerManager().getLastMinBalance(0);
diff --git a/src/transactions/test/FeeBumpTransactionTests.cpp b/src/transactions/test/FeeBumpTransactionTests.cpp
index c645829d61..efc2ec212e 100644
--- a/src/transactions/test/FeeBumpTransactionTests.cpp
+++ b/src/transactions/test/FeeBumpTransactionTests.cpp
@@ -66,7 +66,7 @@ TEST_CASE_VERSIONS("fee bump transactions", "[tx][feebump]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto& lm = app->getLedgerManager();
     auto fee = lm.getLastClosedLedgerHeader().header.baseFee;
diff --git a/src/transactions/test/InflationTests.cpp b/src/transactions/test/InflationTests.cpp
index dbf2d8feef..f5cc0697f0 100644
--- a/src/transactions/test/InflationTests.cpp
+++ b/src/transactions/test/InflationTests.cpp
@@ -432,7 +432,7 @@ TEST_CASE_VERSIONS("inflation total coins", "[tx][inflation]")
 
 TEST_CASE_VERSIONS("inflation", "[tx][inflation]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock::system_time_point inflationStart;
     // inflation starts on 1-jul-2014
diff --git a/src/transactions/test/InvokeHostFunctionTests.cpp b/src/transactions/test/InvokeHostFunctionTests.cpp
index c7c19295fc..957c396da0 100644
--- a/src/transactions/test/InvokeHostFunctionTests.cpp
+++ b/src/transactions/test/InvokeHostFunctionTests.cpp
@@ -2553,149 +2553,113 @@ TEST_CASE("charge rent fees for storage resize", "[tx][soroban]")
 
 TEST_CASE("temp entry eviction", "[tx][soroban]")
 {
-    auto test = [](bool enableBucketListDB, bool backgroundEviction) {
-        if (backgroundEviction && !enableBucketListDB)
-        {
-            throw "testing error: backgroundEviction requires "
-                  "enableBucketListDB == true";
-        }
-
-        Config cfg = getTestConfig();
-        TmpDirManager tdm(std::string("soroban-storage-meta-") +
-                          binToHex(randomBytes(8)));
-        TmpDir td = tdm.tmpDir("soroban-meta-ok");
-        std::string metaPath = td.getName() + "/stream.xdr";
-
-        cfg.METADATA_OUTPUT_STREAM = metaPath;
-        cfg.DEPRECATED_SQL_LEDGER_STATE = !enableBucketListDB;
-        cfg.BACKGROUND_EVICTION_SCAN = backgroundEviction;
-
-        // overrideSorobanNetworkConfigForTest commits directly to the
-        // database, will not work if BucketListDB is enabled so we must use
-        // the cfg override
-        if (enableBucketListDB)
-        {
-            cfg.TESTING_SOROBAN_HIGH_LIMIT_OVERRIDE = true;
-        }
-
-        SorobanTest test(cfg);
-        ContractStorageTestClient client(test);
-        auto const& contractKeys = client.getContract().getKeys();
 
-        // Extend Wasm and instance
-        test.invokeExtendOp(contractKeys, 10'000);
+    Config cfg = getTestConfig();
+    TmpDirManager tdm(std::string("soroban-storage-meta-") +
+                      binToHex(randomBytes(8)));
+    TmpDir td = tdm.tmpDir("soroban-meta-ok");
+    std::string metaPath = td.getName() + "/stream.xdr";
 
-        auto invocation = client.getContract().prepareInvocation(
-            "put_temporary", {makeSymbolSCVal("key"), makeU64SCVal(123)},
-            client.writeKeySpec("key", ContractDataDurability::TEMPORARY));
-        REQUIRE(invocation.withExactNonRefundableResourceFee().invoke());
-        auto lk = client.getContract().getDataKey(
-            makeSymbolSCVal("key"), ContractDataDurability::TEMPORARY);
+    cfg.METADATA_OUTPUT_STREAM = metaPath;
 
-        auto expectedLiveUntilLedger =
-            test.getLCLSeq() +
-            test.getNetworkCfg().stateArchivalSettings().minTemporaryTTL - 1;
-        REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
-        auto const evictionLedger = 4097;
+    SorobanTest test(cfg);
+    ContractStorageTestClient client(test);
+    auto const& contractKeys = client.getContract().getKeys();
+
+    // Extend Wasm and instance
+    test.invokeExtendOp(contractKeys, 10'000);
+
+    auto invocation = client.getContract().prepareInvocation(
+        "put_temporary", {makeSymbolSCVal("key"), makeU64SCVal(123)},
+        client.writeKeySpec("key", ContractDataDurability::TEMPORARY));
+    REQUIRE(invocation.withExactNonRefundableResourceFee().invoke());
+    auto lk = client.getContract().getDataKey(
+        makeSymbolSCVal("key"), ContractDataDurability::TEMPORARY);
+
+    auto expectedLiveUntilLedger =
+        test.getLCLSeq() +
+        test.getNetworkCfg().stateArchivalSettings().minTemporaryTTL - 1;
+    REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
+    auto const evictionLedger = 4097;
+
+    // Close ledgers until temp entry is evicted
+    for (uint32_t i = test.getLCLSeq(); i < evictionLedger - 2; ++i)
+    {
+        closeLedgerOn(test.getApp(), i, 2, 1, 2016);
+    }
 
-        // Close ledgers until temp entry is evicted
-        for (uint32_t i = test.getLCLSeq(); i < evictionLedger - 2; ++i)
-        {
-            closeLedgerOn(test.getApp(), i, 2, 1, 2016);
-        }
+    REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
 
-        REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
+    // This should be a noop
+    test.invokeExtendOp({lk}, 10'000, 0);
+    REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
 
-        // This should be a noop
-        test.invokeExtendOp({lk}, 10'000, 0);
-        REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
+    // This will fail because the entry is expired
+    REQUIRE(client.extend("key", ContractDataDurability::TEMPORARY, 10'000,
+                          10'000) == INVOKE_HOST_FUNCTION_TRAPPED);
+    REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
 
-        // This will fail because the entry is expired
-        REQUIRE(client.extend("key", ContractDataDurability::TEMPORARY, 10'000,
-                              10'000) == INVOKE_HOST_FUNCTION_TRAPPED);
-        REQUIRE(test.getTTL(lk) == expectedLiveUntilLedger);
+    REQUIRE(!test.isEntryLive(lk, test.getLCLSeq()));
 
-        REQUIRE(!test.isEntryLive(lk, test.getLCLSeq()));
+    SECTION("eviction")
+    {
+        // close one more ledger to trigger the eviction
+        closeLedgerOn(test.getApp(), evictionLedger, 2, 1, 2016);
 
-        SECTION("eviction")
         {
-            // close one more ledger to trigger the eviction
-            closeLedgerOn(test.getApp(), evictionLedger, 2, 1, 2016);
-
-            {
-                LedgerTxn ltx(test.getApp().getLedgerTxnRoot());
-                REQUIRE(!ltx.load(lk));
-            }
-
-            XDRInputFileStream in;
-            in.open(metaPath);
-            LedgerCloseMeta lcm;
-            bool evicted = false;
-            while (in.readOne(lcm))
-            {
-                REQUIRE(lcm.v() == 1);
-                if (lcm.v1().ledgerHeader.header.ledgerSeq == evictionLedger)
-                {
-                    REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.size() == 2);
-                    auto sortedKeys = lcm.v1().evictedTemporaryLedgerKeys;
-                    std::sort(sortedKeys.begin(), sortedKeys.end());
-                    REQUIRE(sortedKeys[0] == lk);
-                    REQUIRE(sortedKeys[1] == getTTLKey(lk));
-                    evicted = true;
-                }
-                else
-                {
-                    REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty());
-                }
-            }
-
-            REQUIRE(evicted);
+            LedgerTxn ltx(test.getApp().getLedgerTxnRoot());
+            REQUIRE(!ltx.load(lk));
         }
 
-        SECTION(
-            "Create temp entry with same key as an expired entry on eviction "
-            "ledger")
+        XDRInputFileStream in;
+        in.open(metaPath);
+        LedgerCloseMeta lcm;
+        bool evicted = false;
+        while (in.readOne(lcm))
         {
-            REQUIRE(client.put("key", ContractDataDurability::TEMPORARY, 234) ==
-                    INVOKE_HOST_FUNCTION_SUCCESS);
+            REQUIRE(lcm.v() == 1);
+            if (lcm.v1().ledgerHeader.header.ledgerSeq == evictionLedger)
             {
-                LedgerTxn ltx(test.getApp().getLedgerTxnRoot());
-                REQUIRE(ltx.load(lk));
+                REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.size() == 2);
+                auto sortedKeys = lcm.v1().evictedTemporaryLedgerKeys;
+                std::sort(sortedKeys.begin(), sortedKeys.end());
+                REQUIRE(sortedKeys[0] == lk);
+                REQUIRE(sortedKeys[1] == getTTLKey(lk));
+                evicted = true;
             }
-
-            // Verify that we're on the ledger where the entry would get evicted
-            // it wasn't recreated.
-            REQUIRE(test.getLCLSeq() == evictionLedger);
-
-            // Entry is live again
-            REQUIRE(test.isEntryLive(lk, test.getLCLSeq()));
-
-            // Verify that we didn't emit an eviction
-            XDRInputFileStream in;
-            in.open(metaPath);
-            LedgerCloseMeta lcm;
-            while (in.readOne(lcm))
+            else
             {
                 REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty());
             }
         }
-    };
 
-    SECTION("sql")
-    {
-        test(/*enableBucketListDB=*/false, /*backgroundEviction=*/false);
+        REQUIRE(evicted);
     }
 
-    SECTION("BucketListDB")
+    SECTION("Create temp entry with same key as an expired entry on eviction "
+            "ledger")
     {
-        SECTION("legacy main thread scan")
+        REQUIRE(client.put("key", ContractDataDurability::TEMPORARY, 234) ==
+                INVOKE_HOST_FUNCTION_SUCCESS);
         {
-            test(/*enableBucketListDB=*/true, /*backgroundEviction=*/false);
+            LedgerTxn ltx(test.getApp().getLedgerTxnRoot());
+            REQUIRE(ltx.load(lk));
         }
 
-        SECTION("background scan")
+        // Verify that we're on the ledger where the entry would get evicted
+        // it wasn't recreated.
+        REQUIRE(test.getLCLSeq() == evictionLedger);
+
+        // Entry is live again
+        REQUIRE(test.isEntryLive(lk, test.getLCLSeq()));
+
+        // Verify that we didn't emit an eviction
+        XDRInputFileStream in;
+        in.open(metaPath);
+        LedgerCloseMeta lcm;
+        while (in.readOne(lcm))
         {
-            test(/*enableBucketListDB=*/true, /*backgroundEviction=*/true);
+            REQUIRE(lcm.v1().evictedTemporaryLedgerKeys.empty());
         }
     }
 }
@@ -2839,7 +2803,7 @@ TEST_CASE("state archival operation errors", "[tx][soroban]")
 TEST_CASE("settings upgrade command line utils", "[tx][soroban][upgrades]")
 {
     VirtualClock clock;
-    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     cfg.ENABLE_SOROBAN_DIAGNOSTIC_EVENTS = true;
     auto app = createTestApplication(clock, cfg);
     auto root = TestAccount::createRoot(*app);
diff --git a/src/transactions/test/LiquidityPoolDepositTests.cpp b/src/transactions/test/LiquidityPoolDepositTests.cpp
index 2bf6cd413a..1b8b899eaf 100644
--- a/src/transactions/test/LiquidityPoolDepositTests.cpp
+++ b/src/transactions/test/LiquidityPoolDepositTests.cpp
@@ -18,7 +18,7 @@ TEST_CASE_VERSIONS("liquidity pool deposit", "[tx][liquiditypool]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto const& lm = app->getLedgerManager();
diff --git a/src/transactions/test/LiquidityPoolTradeTests.cpp b/src/transactions/test/LiquidityPoolTradeTests.cpp
index 9cddacf59b..12b0ab3779 100644
--- a/src/transactions/test/LiquidityPoolTradeTests.cpp
+++ b/src/transactions/test/LiquidityPoolTradeTests.cpp
@@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("liquidity pool trade", "[tx][liquiditypool]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto minBal = [&](int32_t n) {
diff --git a/src/transactions/test/LiquidityPoolWithdrawTests.cpp b/src/transactions/test/LiquidityPoolWithdrawTests.cpp
index a6cb9b6c77..df3acf8b3d 100644
--- a/src/transactions/test/LiquidityPoolWithdrawTests.cpp
+++ b/src/transactions/test/LiquidityPoolWithdrawTests.cpp
@@ -17,7 +17,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("liquidity pool withdraw", "[tx][liquiditypool]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/ManageBuyOfferTests.cpp b/src/transactions/test/ManageBuyOfferTests.cpp
index 377794b5b0..8eb416e7b9 100644
--- a/src/transactions/test/ManageBuyOfferTests.cpp
+++ b/src/transactions/test/ManageBuyOfferTests.cpp
@@ -47,7 +47,7 @@ TEST_CASE_VERSIONS("manage buy offer failure modes", "[tx][offers]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -354,7 +354,7 @@ TEST_CASE_VERSIONS("manage buy offer liabilities", "[tx][offers]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto checkLiabilities = [&](std::string const& section, int64_t buyAmount,
                                 Price const& price, int64_t expectedBuying,
@@ -438,7 +438,7 @@ TEST_CASE_VERSIONS("manage buy offer exactly crosses existing offers",
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -491,7 +491,7 @@ TEST_CASE_VERSIONS(
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -619,7 +619,7 @@ TEST_CASE_VERSIONS(
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -774,7 +774,7 @@ TEST_CASE_VERSIONS(
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -927,7 +927,7 @@ TEST_CASE_VERSIONS("manage buy offer with zero liabilities", "[tx][offers]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
@@ -983,7 +983,7 @@ TEST_CASE_VERSIONS("manage buy offer releases liabilities before modify",
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     int64_t const txfee = app->getLedgerManager().getLastTxFee();
     int64_t const minBalancePlusFees =
diff --git a/src/transactions/test/ManageDataTests.cpp b/src/transactions/test/ManageDataTests.cpp
index d1b5dbcfe4..770ba6f2e5 100644
--- a/src/transactions/test/ManageDataTests.cpp
+++ b/src/transactions/test/ManageDataTests.cpp
@@ -26,7 +26,7 @@ using namespace stellar::txtest;
 // add too much data
 TEST_CASE_VERSIONS("manage data", "[tx][managedata]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/MergeTests.cpp b/src/transactions/test/MergeTests.cpp
index d462768ac0..83595e7ff1 100644
--- a/src/transactions/test/MergeTests.cpp
+++ b/src/transactions/test/MergeTests.cpp
@@ -34,7 +34,7 @@ using namespace stellar::txtest;
 // Merge when you have outstanding data entries
 TEST_CASE_VERSIONS("merge", "[tx][merge]")
 {
-    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+    Config cfg(getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/OfferTests.cpp b/src/transactions/test/OfferTests.cpp
index 184766e7b8..d85c7eb9a5 100644
--- a/src/transactions/test/OfferTests.cpp
+++ b/src/transactions/test/OfferTests.cpp
@@ -36,7 +36,7 @@ using namespace stellar::txtest;
 
 TEST_CASE_VERSIONS("create offer", "[tx][offers]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/PathPaymentStrictSendTests.cpp b/src/transactions/test/PathPaymentStrictSendTests.cpp
index 6eb6a153a5..21fb6c48f1 100644
--- a/src/transactions/test/PathPaymentStrictSendTests.cpp
+++ b/src/transactions/test/PathPaymentStrictSendTests.cpp
@@ -178,7 +178,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto exchanged = [&](TestMarketOffer const& o, int64_t sold,
                          int64_t bought) {
@@ -2406,7 +2406,7 @@ TEST_CASE_VERSIONS("pathpayment strict send", "[tx][pathpayment]")
 TEST_CASE_VERSIONS("pathpayment strict send uses all offers in a loop",
                    "[tx][pathpayment]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
 
diff --git a/src/transactions/test/PathPaymentTests.cpp b/src/transactions/test/PathPaymentTests.cpp
index 2b74d11974..20b5d048be 100644
--- a/src/transactions/test/PathPaymentTests.cpp
+++ b/src/transactions/test/PathPaymentTests.cpp
@@ -70,7 +70,7 @@ assetPathToString(const std::deque<Asset>& assets)
 
 TEST_CASE_VERSIONS("pathpayment", "[tx][pathpayment]")
 {
-    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/PaymentTests.cpp b/src/transactions/test/PaymentTests.cpp
index e53faded26..d7bbf0807b 100644
--- a/src/transactions/test/PaymentTests.cpp
+++ b/src/transactions/test/PaymentTests.cpp
@@ -38,7 +38,7 @@ using namespace stellar::txtest;
 // path payment with a transfer rate
 TEST_CASE_VERSIONS("payment", "[tx][payment]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
 
@@ -1510,7 +1510,11 @@ TEST_CASE_VERSIONS("payment", "[tx][payment]")
 
             // Since a1 has a trustline, and there is only 1 trustline, we know
             // that gateway has no trustlines.
-            REQUIRE(app->getLedgerTxnRoot().countObjects(TRUSTLINE) == 1);
+            LedgerSnapshot lsg(*app);
+            LedgerKey trustKey(TRUSTLINE);
+            trustKey.trustLine().accountID = gateway.getPublicKey();
+            trustKey.trustLine().asset = assetToTrustLineAsset(idr);
+            REQUIRE(!lsg.load(trustKey));
         });
     }
     SECTION("authorize flag")
@@ -1930,7 +1934,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]")
 
     SECTION("fee equal to base reserve")
     {
-        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY);
         cfg.TESTING_UPGRADE_DESIRED_FEE = 100000000;
 
         VirtualClock clock;
@@ -2040,7 +2044,7 @@ TEST_CASE_VERSIONS("payment fees", "[tx][payment]")
 
     SECTION("fee bigger than base reserve")
     {
-        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+        auto cfg = getTestConfig(1, Config::TESTDB_IN_MEMORY);
         cfg.TESTING_UPGRADE_DESIRED_FEE = 200000000;
 
         VirtualClock clock;
diff --git a/src/transactions/test/RevokeSponsorshipTests.cpp b/src/transactions/test/RevokeSponsorshipTests.cpp
index aa7c4db2fe..aee010fc1e 100644
--- a/src/transactions/test/RevokeSponsorshipTests.cpp
+++ b/src/transactions/test/RevokeSponsorshipTests.cpp
@@ -40,7 +40,7 @@ TEST_CASE_VERSIONS("update sponsorship", "[tx][sponsorship]")
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     auto minBal = [&](uint32_t n) {
         return app->getLedgerManager().getLastMinBalance(n);
diff --git a/src/transactions/test/SetOptionsTests.cpp b/src/transactions/test/SetOptionsTests.cpp
index 482b55a0e9..655259e190 100644
--- a/src/transactions/test/SetOptionsTests.cpp
+++ b/src/transactions/test/SetOptionsTests.cpp
@@ -36,7 +36,7 @@ using namespace stellar::txtest;
 // minbalance
 TEST_CASE_VERSIONS("set options", "[tx][setoptions]")
 {
-    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/transactions/test/SetTrustLineFlagsTests.cpp b/src/transactions/test/SetTrustLineFlagsTests.cpp
index c7de65867c..1946dcd5fb 100644
--- a/src/transactions/test/SetTrustLineFlagsTests.cpp
+++ b/src/transactions/test/SetTrustLineFlagsTests.cpp
@@ -105,7 +105,7 @@ getNumOffers(Application& app, TestAccount const& account, Asset const& asset)
 
 TEST_CASE_VERSIONS("set trustline flags", "[tx][settrustlineflags]")
 {
-    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    auto const& cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
@@ -380,7 +380,7 @@ TEST_CASE_VERSIONS("revoke from pool",
 {
     VirtualClock clock;
     auto app = createTestApplication(
-        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS));
+        clock, getTestConfig(0, Config::TESTDB_IN_MEMORY));
 
     // set up world
     auto root = TestAccount::createRoot(*app);
diff --git a/src/transactions/test/TxEnvelopeTests.cpp b/src/transactions/test/TxEnvelopeTests.cpp
index 99dc560533..c8ed667e6b 100644
--- a/src/transactions/test/TxEnvelopeTests.cpp
+++ b/src/transactions/test/TxEnvelopeTests.cpp
@@ -86,7 +86,7 @@ TEST_CASE("txset - correct apply order", "[tx][envelope]")
 
 TEST_CASE_VERSIONS("txenvelope", "[tx][envelope]")
 {
-    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY_NO_OFFERS);
+    Config cfg = getTestConfig(0, Config::TESTDB_IN_MEMORY);
 
     VirtualClock clock;
     auto app = createTestApplication(clock, cfg);
diff --git a/src/util/ProtocolVersion.h b/src/util/ProtocolVersion.h
index b908b8f4a9..32341840c9 100644
--- a/src/util/ProtocolVersion.h
+++ b/src/util/ProtocolVersion.h
@@ -34,7 +34,8 @@ enum class ProtocolVersion : uint32_t
     V_19,
     V_20,
     V_21,
-    V_22
+    V_22,
+    V_23
 };
 
 // Checks whether provided protocolVersion is before (i.e. strictly lower than)
diff --git a/src/util/test/XDRStreamTests.cpp b/src/util/test/XDRStreamTests.cpp
index 7710562c91..16754b5a1b 100644
--- a/src/util/test/XDRStreamTests.cpp
+++ b/src/util/test/XDRStreamTests.cpp
@@ -33,7 +33,7 @@ TEST_CASE("XDROutputFileStream fail modes", "[xdrstream]")
         size_t bytes = 0;
         auto ledgerEntries = LedgerTestUtils::generateValidLedgerEntries(1);
         auto bucketEntries =
-            Bucket::convertToBucketEntry(false, {}, ledgerEntries, {});
+            LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {});
 
         REQUIRE_THROWS_AS(out.writeOne(bucketEntries[0], &hasher, &bytes),
                           std::runtime_error);
@@ -53,7 +53,7 @@ TEST_CASE("XDROutputFileStream fsync bench", "[!hide][xdrstream][bench]")
     SHA256 hasher;
     auto ledgerEntries = LedgerTestUtils::generateValidLedgerEntries(10000000);
     auto bucketEntries =
-        Bucket::convertToBucketEntry(false, {}, ledgerEntries, {});
+        LiveBucket::convertToBucketEntry(false, {}, ledgerEntries, {});
 
     fs::mkpath(cfg.BUCKET_DIR_PATH);
 
diff --git a/src/util/types.h b/src/util/types.h
index 0a893c5a1e..5dd02640e8 100644
--- a/src/util/types.h
+++ b/src/util/types.h
@@ -128,6 +128,22 @@ assetToString(const Asset& asset)
     return r;
 };
 
+inline LedgerKey
+getBucketLedgerKey(HotArchiveBucketEntry const& be)
+{
+    switch (be.type())
+    {
+    case HOT_ARCHIVE_LIVE:
+    case HOT_ARCHIVE_DELETED:
+        return be.key();
+    case HOT_ARCHIVE_ARCHIVED:
+        return LedgerEntryKey(be.archivedEntry());
+    case HOT_ARCHIVE_METAENTRY:
+    default:
+        throw std::invalid_argument("Tried to get key for METAENTRY");
+    }
+}
+
 inline LedgerKey
 getBucketLedgerKey(BucketEntry const& be)
 {
@@ -144,6 +160,14 @@ getBucketLedgerKey(BucketEntry const& be)
     }
 }
 
+// TODO: Implement
+inline LedgerKey
+getBucketLedgerKey(ColdArchiveBucketEntry const& be)
+{
+    LedgerKey k;
+    return k;
+}
+
 // Round value v down to largest multiple of m, m must be power of 2
 template <typename T>
 inline T