Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
16 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -2074,7 +2074,7 @@ public BesuControllerBuilder setupControllerBuilder() {
.apiConfiguration(apiConfiguration)
.balConfiguration(balConfiguration)
.besuComponent(besuComponent);
if (DataStorageFormat.BONSAI.equals(getDataStorageConfiguration().getDataStorageFormat())) {
if (getDataStorageConfiguration().getDataStorageFormat().isBonsaiFormat()) {
final PathBasedExtraStorageConfiguration subStorageConfiguration =
getDataStorageConfiguration().getPathBasedExtraStorageConfiguration();
besuControllerBuilder.isParallelTxProcessingEnabled(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -937,6 +937,11 @@ public BesuController build() {
},
0));
} else {
// Already in ARCHIVE mode (restart after migration): register ongoing migration
final BonsaiFlatDbToArchiveMigrator archiveMigrator =
createArchiveMigrator(worldStateStorageCoordinator, worldStateArchive, blockchain);
archiveMigrator.startOngoingMigration();
closeables.add(archiveMigrator);
blockchain.observeBlockAdded(archiver);
}
}
Expand Down Expand Up @@ -1065,7 +1070,8 @@ private BonsaiFlatDbToArchiveMigrator createArchiveMigrator(
blockchain,
migrationExecutor,
metricsSystem,
archiveStrategy);
archiveStrategy,
dataStorageConfiguration.getPathBasedExtraStorageConfiguration().getMaxLayersToLoad());
}

/**
Expand Down Expand Up @@ -1367,12 +1373,13 @@ yield new BonsaiWorldStateProvider(
yield new BonsaiArchiveWorldStateProvider(
worldStateKeyValueStorage,
blockchain,
dataStorageConfiguration.getPathBasedExtraStorageConfiguration(),
dataStorageConfiguration,
bonsaiCachedMerkleTrieLoader,
besuComponent.map(BesuComponent::getBesuPluginContext).orElse(null),
evmConfiguration,
worldStateHealerSupplier,
codeCache);
codeCache,
metricsSystem);
}
case FOREST -> {
final WorldStatePreimageStorage preimageStorage =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,24 +44,28 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
EnumSet.of(X_BONSAI_ARCHIVE),
true,
false,
true,
true),
ACCOUNT_STORAGE_ARCHIVE(
"ACCOUNT_STORAGE_ARCHIVE".getBytes(StandardCharsets.UTF_8),
EnumSet.of(X_BONSAI_ARCHIVE),
true,
false,
true,
true),
ACCOUNT_INFO_STATE_FREEZER(
"ACCOUNT_INFO_STATE_FREEZER".getBytes(StandardCharsets.UTF_8),
EnumSet.of(X_BONSAI_ARCHIVE),
true,
false,
true,
true),
ACCOUNT_STORAGE_FREEZER(
"ACCOUNT_STORAGE_FREEZER".getBytes(StandardCharsets.UTF_8),
EnumSet.of(X_BONSAI_ARCHIVE),
true,
false,
true,
true),
VARIABLES(new byte[] {11}), // formerly GOQUORUM_PRIVATE_WORLD_STATE

Expand All @@ -81,6 +85,7 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
private final boolean containsStaticData;
private final boolean eligibleToHighSpecFlag;
private final boolean staticDataGarbageCollectionEnabled;
private final boolean cacheIndexAndFilterBlocks;

KeyValueSegmentIdentifier(final byte[] id) {
this(id, EnumSet.allOf(DataStorageFormat.class));
Expand All @@ -96,11 +101,28 @@ public enum KeyValueSegmentIdentifier implements SegmentIdentifier {
final boolean containsStaticData,
final boolean eligibleToHighSpecFlag,
final boolean staticDataGarbageCollectionEnabled) {
this(
id,
formats,
containsStaticData,
eligibleToHighSpecFlag,
staticDataGarbageCollectionEnabled,
false);
}

KeyValueSegmentIdentifier(
final byte[] id,
final EnumSet<DataStorageFormat> formats,
final boolean containsStaticData,
final boolean eligibleToHighSpecFlag,
final boolean staticDataGarbageCollectionEnabled,
final boolean cacheIndexAndFilterBlocks) {
this.id = id;
this.formats = formats;
this.containsStaticData = containsStaticData;
this.eligibleToHighSpecFlag = eligibleToHighSpecFlag;
this.staticDataGarbageCollectionEnabled = staticDataGarbageCollectionEnabled;
this.cacheIndexAndFilterBlocks = cacheIndexAndFilterBlocks;
}

@Override
Expand Down Expand Up @@ -128,6 +150,11 @@ public boolean isStaticDataGarbageCollectionEnabled() {
return staticDataGarbageCollectionEnabled;
}

@Override
public boolean isCacheIndexAndFilterBlocks() {
return cacheIndexAndFilterBlocks;
}

@Override
public boolean includeInDatabaseFormat(final DataStorageFormat format) {
return formats.contains(format);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,21 @@

import org.hyperledger.besu.datatypes.Hash;
import org.hyperledger.besu.ethereum.chain.Blockchain;
import org.hyperledger.besu.ethereum.core.BlockHeader;
import org.hyperledger.besu.ethereum.core.MutableWorldState;
import org.hyperledger.besu.ethereum.trie.MerkleTrieException;
import org.hyperledger.besu.ethereum.trie.pathbased.bonsai.cache.BonsaiCachedMerkleTrieLoader;
import org.hyperledger.besu.ethereum.trie.pathbased.bonsai.cache.CodeCache;
import org.hyperledger.besu.ethereum.trie.pathbased.bonsai.storage.BonsaiWorldStateKeyValueStorage;
import org.hyperledger.besu.ethereum.trie.pathbased.bonsai.storage.flat.BonsaiArchiveReadFlatDbStrategyProvider;
import org.hyperledger.besu.ethereum.trie.pathbased.bonsai.worldview.BonsaiWorldState;
import org.hyperledger.besu.ethereum.trie.pathbased.common.provider.WorldStateQueryParams;
import org.hyperledger.besu.ethereum.trie.pathbased.common.worldview.PathBasedWorldState;
import org.hyperledger.besu.ethereum.trie.pathbased.common.worldview.WorldStateConfig;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.ethereum.worldstate.PathBasedExtraStorageConfiguration;
import org.hyperledger.besu.evm.internal.EvmConfiguration;
import org.hyperledger.besu.plugin.ServiceManager;
import org.hyperledger.besu.plugin.services.MetricsSystem;

import java.util.Optional;
import java.util.function.Supplier;
Expand All @@ -39,76 +42,86 @@ public class BonsaiArchiveWorldStateProvider extends BonsaiWorldStateProvider {

private static final Logger LOG = LoggerFactory.getLogger(BonsaiArchiveWorldStateProvider.class);

private final BonsaiWorldStateKeyValueStorage archiveReadStorage;
private final CodeCache codeCache;
private final WorldStateConfig archiveWorldStateConfig;

public BonsaiArchiveWorldStateProvider(
final BonsaiWorldStateKeyValueStorage worldStateKeyValueStorage,
final Blockchain blockchain,
final PathBasedExtraStorageConfiguration pathBasedExtraStorageConfiguration,
final DataStorageConfiguration dataStorageConfiguration,
final BonsaiCachedMerkleTrieLoader bonsaiCachedMerkleTrieLoader,
final ServiceManager pluginContext,
final EvmConfiguration evmConfiguration,
final Supplier<WorldStateHealer> worldStateHealerSupplier,
final CodeCache codeCache) {
final CodeCache codeCache,
final MetricsSystem metricsSystem) {
super(
worldStateKeyValueStorage,
blockchain,
pathBasedExtraStorageConfiguration,
dataStorageConfiguration.getPathBasedExtraStorageConfiguration(),
bonsaiCachedMerkleTrieLoader,
pluginContext,
evmConfiguration,
worldStateHealerSupplier,
codeCache);
this.codeCache = codeCache;
this.archiveWorldStateConfig =
WorldStateConfig.newBuilder(worldStateConfig).trieDisabled(true).build();
final BonsaiArchiveReadFlatDbStrategyProvider archiveProvider =
new BonsaiArchiveReadFlatDbStrategyProvider(metricsSystem, dataStorageConfiguration);
archiveProvider.loadFlatDbStrategy(worldStateKeyValueStorage.getComposedWorldStateStorage());
this.archiveReadStorage =
new BonsaiWorldStateKeyValueStorage(
archiveProvider,
worldStateKeyValueStorage.getComposedWorldStateStorage(),
worldStateKeyValueStorage.getTrieLogStorage());
}

@Override
public Optional<MutableWorldState> getWorldState(final WorldStateQueryParams queryParams) {
// If not in archive mode then the migration is not yet complete, so fallback to
// the regular BonsaiWorldStateProvider
if (!worldStateKeyValueStorage.getFlatDbMode().equals(FlatDbMode.ARCHIVE)) {
return super.getWorldState(queryParams);
if (isHistoricalQuery(queryParams)) {
LOG.debug(
"Returning archive state without verifying state root for block {}",
queryParams.getBlockHeader().getNumber());
final BonsaiWorldState archiveWorldState =
new BonsaiWorldState(
this, archiveReadStorage, evmConfiguration, archiveWorldStateConfig, codeCache);
// Freeze before the persisting to ensure that the historical block number which is needed for
// Bonsai archive does not affect the database
archiveWorldState.freezeStorage();
return rollMutableArchiveStateToBlockHash(
archiveWorldState, queryParams.getBlockHeader().getBlockHash());
}
return super.getWorldState(queryParams);
}

if (queryParams.shouldWorldStateUpdateHead()) {
return getFullWorldState(queryParams);
} else {
// If we are creating a world state for a historic/archive block, we have 2 options:
// 1. Roll back and create a layered world state. We can do this as far back as 512 blocks by
// default, and we end up with a full state trie & flat DB at the desired block
// 2. Rely entirely on the flat DB, which is less safe because we can't check the world state
// root is correct but at least gives us the ability to serve historic state. The rollback
// step in this case is minimal - take the chain head state and reset the block hash and
// number for
// archive flat DB queries
final BlockHeader chainHeadBlockHeader = blockchain.getChainHeadHeader();
if (chainHeadBlockHeader.getNumber() - queryParams.getBlockHeader().getNumber()
>= trieLogManager.getMaxLayersToLoad()) {
LOG.debug(
"Returning archive state without verifying state root {}",
trieLogManager.getMaxLayersToLoad());
return cachedWorldStorageManager
.getWorldState(chainHeadBlockHeader.getHash())
.map(MutableWorldState::disableTrie)
.flatMap(
worldState ->
rollMutableArchiveStateToBlockHash( // This is a tiny action for archive
// state
(PathBasedWorldState) worldState,
queryParams.getBlockHeader().getBlockHash()))
.map(MutableWorldState::freezeStorage);
}
return super.getWorldState(queryParams);
@Override
public void close() {
super.close();
try {
archiveReadStorage.close();
} catch (Exception e) {
// no-op, consistent with parent
}
}

private boolean isHistoricalQuery(final WorldStateQueryParams queryParams) {
return worldStateKeyValueStorage.getFlatDbMode().equals(FlatDbMode.ARCHIVE)
&& !queryParams.shouldWorldStateUpdateHead()
&& blockchain.getChainHeadHeader().getNumber() - queryParams.getBlockHeader().getNumber()
>= trieLogManager.getMaxLayersToLoad();
}

// Archive-specific rollback behaviour. There is no trie-log roll forward/backward, we just roll
// back the state root, block hash and block number
protected Optional<MutableWorldState> rollMutableArchiveStateToBlockHash(
final PathBasedWorldState mutableState, final Hash blockHash) {
LOG.trace(
"Rolling mutable archive world state to block hash " + blockHash.getBytes().toHexString());
"Rolling mutable archive world state to block hash {}", blockHash.getBytes().toHexString());
try {
// Simply persist the block hash/number and state root for this archive state
mutableState.persist(blockchain.getBlockHeader(blockHash).get());

LOG.trace(
"Archive rolling finished, {} now at {}",
mutableState.getWorldStateStorage().getClass().getSimpleName(),
Expand All @@ -124,7 +137,6 @@ protected Optional<MutableWorldState> rollMutableArchiveStateToBlockHash(
.addArgument(blockHash)
.addArgument(e)
.log();

return Optional.empty();
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
/*
* Copyright contributors to Hyperledger Besu.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/
package org.hyperledger.besu.ethereum.trie.pathbased.bonsai.storage.flat;

import org.hyperledger.besu.ethereum.trie.pathbased.common.storage.flat.CodeStorageStrategy;
import org.hyperledger.besu.ethereum.trie.pathbased.common.storage.flat.FlatDbStrategy;
import org.hyperledger.besu.ethereum.worldstate.DataStorageConfiguration;
import org.hyperledger.besu.ethereum.worldstate.FlatDbMode;
import org.hyperledger.besu.plugin.services.MetricsSystem;

/**
* A {@link BonsaiFlatDbStrategyProvider} that always returns {@link BonsaiArchiveFlatDbStrategy},
* regardless of the {@link FlatDbMode} stored in the database. Used to create a read-only archive
* storage view that shares the same underlying RocksDB segments as the main storage but routes all
* flat-DB reads through the seekForPrev archive path.
*/
public class BonsaiArchiveReadFlatDbStrategyProvider extends BonsaiFlatDbStrategyProvider {

public BonsaiArchiveReadFlatDbStrategyProvider(
final MetricsSystem metricsSystem, final DataStorageConfiguration dataStorageConfiguration) {
super(metricsSystem, dataStorageConfiguration);
}

@Override
protected FlatDbStrategy createFlatDbStrategy(
final FlatDbMode flatDbMode,
final MetricsSystem metricsSystem,
final CodeStorageStrategy codeStorageStrategy) {
return new BonsaiArchiveFlatDbStrategy(metricsSystem, codeStorageStrategy);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -89,10 +89,8 @@ protected FlatDbStrategy createFlatDbStrategy(
final FlatDbMode flatDbMode,
final MetricsSystem metricsSystem,
final CodeStorageStrategy codeStorageStrategy) {
if (flatDbMode == FlatDbMode.FULL) {
if (flatDbMode == FlatDbMode.FULL || flatDbMode == FlatDbMode.ARCHIVE) {
return new BonsaiFullFlatDbStrategy(metricsSystem, codeStorageStrategy);
} else if (flatDbMode == FlatDbMode.ARCHIVE) {
return new BonsaiArchiveFlatDbStrategy(metricsSystem, codeStorageStrategy);
} else {
return new BonsaiPartialFlatDbStrategy(metricsSystem, codeStorageStrategy);
}
Expand Down
Loading
Loading