Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@
import java.util.concurrent.atomic.AtomicInteger;
import java.util.function.IntSupplier;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.RegionInfo;
import org.apache.hadoop.hbase.conf.ConfigurationManager;
import org.apache.hadoop.hbase.conf.PropagatingConfigurationObserver;
Expand Down Expand Up @@ -344,6 +345,11 @@ protected void requestCompactionInternal(HRegion region, HStore store, String wh
return;
}

if (isReadOnlyEnabled()) {
LOG.info("Ignoring compaction request for " + region + ",because read-only mode is on.");
return;
}

Comment on lines +348 to +352
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why we don't simply disable compaction altogether in the read replica cluster? See line #343 in CompactionSplit, there's already a check for compaction enabled flag. I would rather refrain from polluting CompactiSplit code with logic for read replica.

Copy link
Author

@sharmaar12 sharmaar12 Oct 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can use that approach but then one issue I can think of is that hbase.global.readonly.enabled property is dynamically configurable using update_all_config but is it true for hbase.hstore.compaction.enabled also?

Copy link
Contributor

@anmolnar anmolnar Oct 29, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I like @wchevreuil 's idea.
How about adding the read-only check to the getter?

  public boolean isCompactionsEnabled() {
    return compactionsEnabled && !isReadOnlyEnabled();
  }

You don't need to dynamically change the compaction flag.
wdyt?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Then we may need to at least modify the log messages to mention that either compaction is disabled or readonly mode is on. Otherwise compaction may be enabled but we are logging it as disabled because of read-only mode.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

LOG.info("Ignoring compaction request for " + region + 
  (!isReadOnlyEnabled ? ", because compaction is disabled." : " in read-only mode"));

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

or just leave it as is, not a biggy

if (
this.server.isStopped() || (region.getTableDescriptor() != null
&& !region.getTableDescriptor().isCompactionEnabled())
Expand Down Expand Up @@ -442,6 +448,13 @@ private Optional<CompactionContext> selectCompaction(HRegion region, HStore stor
LOG.info(String.format("User has disabled compactions"));
return Optional.empty();
}

// Should not allow compaction if cluster is in read-only mode
if (isReadOnlyEnabled()) {
LOG.info(String.format("Compaction request skipped as read-only mode is on"));
return Optional.empty();
}

Optional<CompactionContext> compaction = store.requestCompaction(priority, tracker, user);
if (!compaction.isPresent() && region.getRegionInfo() != null) {
String reason = "Not compacting " + region.getRegionInfo().getRegionNameAsString()
Expand Down Expand Up @@ -856,6 +869,11 @@ public boolean isCompactionsEnabled() {
return compactionsEnabled;
}

private boolean isReadOnlyEnabled() {
return conf.getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
}

public void setCompactionsEnabled(boolean compactionsEnabled) {
this.compactionsEnabled = compactionsEnabled;
this.conf.setBoolean(HBASE_REGION_SERVER_ENABLE_COMPACTION, compactionsEnabled);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ private HFileContext createFileContext(Compression.Algorithm compression,
public final StoreFileWriter createWriter(CreateStoreFileWriterParams params) throws IOException {
if (!isPrimaryReplica || isReadOnlyEnabled()) {
throw new IllegalStateException(
"Should not call create writer on secondary replicas or in read only mode");
"Should not call create writer on secondary replicas or in read-only mode");
}
// creating new cache config for each new writer
final CacheConfig cacheConf = ctx.getCacheConf();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@
import org.apache.hadoop.hbase.filter.Filter;
import org.apache.hadoop.hbase.regionserver.FlushLifeCycleTracker;
import org.apache.hadoop.hbase.regionserver.MiniBatchOperationInProgress;
import org.apache.hadoop.hbase.regionserver.Store;
import org.apache.hadoop.hbase.regionserver.StoreFile;
import org.apache.hadoop.hbase.regionserver.compactions.CompactionLifeCycleTracker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.wal.WALEdit;
import org.apache.yetus.audience.InterfaceAudience;
Expand Down Expand Up @@ -81,6 +84,7 @@ private void internalReadOnlyGuard() throws IOException {

@Override
public void start(CoprocessorEnvironment env) throws IOException {

this.globalReadOnlyEnabled =
env.getConfiguration().getBoolean(HConstants.HBASE_GLOBAL_READONLY_ENABLED_KEY,
HConstants.HBASE_GLOBAL_READONLY_ENABLED_DEFAULT);
Expand Down Expand Up @@ -131,6 +135,13 @@ public void preFlush(final ObserverContext<? extends RegionCoprocessorEnvironmen
internalReadOnlyGuard();
}

@Override
public void preCompactSelection(ObserverContext<? extends RegionCoprocessorEnvironment> c,
Store store, List<? extends StoreFile> candidates, CompactionLifeCycleTracker tracker)
throws IOException {
internalReadOnlyGuard();
}

@Override
public boolean preCheckAndPut(ObserverContext<? extends RegionCoprocessorEnvironment> c,
byte[] row, byte[] family, byte[] qualifier, CompareOperator op, ByteArrayComparable comparator,
Expand Down