Skip to content
Merged
Show file tree
Hide file tree
Changes from 4 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -96,8 +96,11 @@ private void registerBulkLoad(ObserverContext<? extends RegionCoprocessorEnviron
try (Connection connection = ConnectionFactory.createConnection(cfg);
BackupSystemTable tbl = new BackupSystemTable(connection)) {
Set<TableName> fullyBackedUpTables = tbl.getTablesIncludedInBackups();
Map<TableName, Long> continuousBackupTableSet = tbl.getContinuousBackupTableSet();

if (fullyBackedUpTables.contains(tableName)) {
if (
fullyBackedUpTables.contains(tableName) && !continuousBackupTableSet.containsKey(tableName)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nit] do you see a lot of entries before this change that keeps registering for the same table? if so and if this is not only unit test, do you think it's a logic error from that trigger?

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I have a suggestion. Perhaps we could add a comment stating that for continuous backup, this isn't necessary, as everything will be utilized from the WAL backup location.

Copy link
Author

@ankitsol ankitsol Oct 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Sorry I didn't understand the question completely. This BackupObserver#registerBulkLoad() is called for each bulkload operation and registers them in backup system table

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

after adding the comment should have addressed my concerns, and yeah !continuousBackupTableSet.containsKey(tableName) means only non-continuous backup need this register bulkload.

) {
tbl.registerBulkLoad(tableName, region.getEncodedNameAsBytes(), cfToHFilePaths);
} else {
if (LOG.isTraceEnabled()) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.CONF_CONTINUOUS_BACKUP_WAL_DIR;
import static org.apache.hadoop.hbase.backup.BackupRestoreConstants.JOB_NAME_CONF_KEY;
import static org.apache.hadoop.hbase.backup.replication.ContinuousBackupReplicationEndpoint.ONE_DAY_IN_MILLISECONDS;
import static org.apache.hadoop.hbase.backup.util.BackupFileSystemManager.BULKLOAD_FILES_DIR;
import static org.apache.hadoop.hbase.backup.util.BackupFileSystemManager.WALS_DIR;
import static org.apache.hadoop.hbase.backup.util.BackupUtils.DATE_FORMAT;

Expand All @@ -37,6 +36,7 @@
import java.util.Map;
import java.util.Set;
import java.util.TimeZone;
import java.util.stream.Collectors;
import org.apache.commons.io.FilenameUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.fs.FileStatus;
Expand All @@ -55,6 +55,7 @@
import org.apache.hadoop.hbase.backup.mapreduce.MapReduceBackupCopyJob;
import org.apache.hadoop.hbase.backup.mapreduce.MapReduceHFileSplitterJob;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.backup.util.BulkFilesCollector;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Connection;
Expand Down Expand Up @@ -86,6 +87,7 @@
@InterfaceAudience.Private
public class IncrementalTableBackupClient extends TableBackupClient {
private static final Logger LOG = LoggerFactory.getLogger(IncrementalTableBackupClient.class);
private static final String BULKLOAD_COLLECTOR_OUTPUT = "bulkload-collector-output";

protected IncrementalTableBackupClient() {
}
Expand Down Expand Up @@ -137,89 +139,88 @@ protected static int getIndex(TableName tbl, List<TableName> sTableList) {
* the backup is marked as complete.
* @param tablesToBackup list of tables to be backed up
*/
protected List<BulkLoad> handleBulkLoad(List<TableName> tablesToBackup) throws IOException {
protected List<BulkLoad> handleBulkLoad(List<TableName> tablesToBackup,
Map<TableName, List<String>> tablesToWALFileList, Map<TableName, Long> tablesToPrevBackupTs)
throws IOException {
Map<TableName, MergeSplitBulkloadInfo> toBulkload = new HashMap<>();
List<BulkLoad> bulkLoads;
if (backupInfo.isContinuousBackupEnabled()) {
bulkLoads =
backupManager.readBulkloadRows(tablesToBackup, backupInfo.getIncrCommittedWalTs());
} else {
bulkLoads = backupManager.readBulkloadRows(tablesToBackup);
}
List<BulkLoad> bulkLoads = new ArrayList<>();

FileSystem tgtFs;
try {
tgtFs = FileSystem.get(new URI(backupInfo.getBackupRootDir()), conf);
} catch (URISyntaxException use) {
throw new IOException("Unable to get FileSystem", use);
}

Path rootdir = CommonFSUtils.getRootDir(conf);
Path tgtRoot = new Path(new Path(backupInfo.getBackupRootDir()), backupId);

for (BulkLoad bulkLoad : bulkLoads) {
TableName srcTable = bulkLoad.getTableName();
MergeSplitBulkloadInfo bulkloadInfo =
toBulkload.computeIfAbsent(srcTable, MergeSplitBulkloadInfo::new);
String regionName = bulkLoad.getRegion();
String fam = bulkLoad.getColumnFamily();
String filename = FilenameUtils.getName(bulkLoad.getHfilePath());
if (!backupInfo.isContinuousBackupEnabled()) {
bulkLoads = backupManager.readBulkloadRows(tablesToBackup);
for (BulkLoad bulkLoad : bulkLoads) {
TableName srcTable = bulkLoad.getTableName();
MergeSplitBulkloadInfo bulkloadInfo =
toBulkload.computeIfAbsent(srcTable, MergeSplitBulkloadInfo::new);
String regionName = bulkLoad.getRegion();
String fam = bulkLoad.getColumnFamily();
String filename = FilenameUtils.getName(bulkLoad.getHfilePath());

if (!tablesToBackup.contains(srcTable)) {
LOG.debug("Skipping {} since it is not in tablesToBackup", srcTable);
continue;
}
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Should this be moved Line 162? It looks like some variables are being set, but they could end up just not being used because of this if block.

Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
Path p = new Path(tblDir, regionName + Path.SEPARATOR + fam + Path.SEPARATOR + filename);

String srcTableQualifier = srcTable.getQualifierAsString();
String srcTableNs = srcTable.getNamespaceAsString();
Path tgtFam = new Path(tgtRoot, srcTableNs + Path.SEPARATOR + srcTableQualifier
+ Path.SEPARATOR + regionName + Path.SEPARATOR + fam);
if (!tgtFs.mkdirs(tgtFam)) {
throw new IOException("couldn't create " + tgtFam);
}
Path tgt = new Path(tgtFam, filename);

Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
Path archive = new Path(archiveDir, filename);

if (!tablesToBackup.contains(srcTable)) {
LOG.debug("Skipping {} since it is not in tablesToBackup", srcTable);
continue;
}
Path tblDir = CommonFSUtils.getTableDir(rootdir, srcTable);
Path p = new Path(tblDir, regionName + Path.SEPARATOR + fam + Path.SEPARATOR + filename);

// For continuous backup: bulkload files are copied from backup directory defined by
// CONF_CONTINUOUS_BACKUP_WAL_DIR instead of source cluster.
String backupRootDir = conf.get(CONF_CONTINUOUS_BACKUP_WAL_DIR);
if (backupInfo.isContinuousBackupEnabled() && !Strings.isNullOrEmpty(backupRootDir)) {
String dayDirectoryName = BackupUtils.formatToDateString(bulkLoad.getTimestamp());
Path bulkLoadBackupPath =
new Path(backupRootDir, BULKLOAD_FILES_DIR + Path.SEPARATOR + dayDirectoryName);
Path bulkLoadDir = new Path(bulkLoadBackupPath,
srcTable.getNamespaceAsString() + Path.SEPARATOR + srcTable.getNameAsString());
FileSystem backupFs = FileSystem.get(bulkLoadDir.toUri(), conf);
Path fullBulkLoadBackupPath =
new Path(bulkLoadDir, regionName + Path.SEPARATOR + fam + Path.SEPARATOR + filename);
if (backupFs.exists(fullBulkLoadBackupPath)) {
LOG.debug("Backup bulkload file found {}", fullBulkLoadBackupPath);
p = fullBulkLoadBackupPath;
} else {
LOG.warn("Backup bulkload file not found {}", fullBulkLoadBackupPath);
if (fs.exists(p)) {
if (LOG.isTraceEnabled()) {
LOG.trace("found bulk hfile {} in {} for {}", bulkLoad.getHfilePath(), p.getParent(),
srcTableQualifier);
LOG.trace("copying {} to {}", p, tgt);
}
bulkloadInfo.addActiveFile(p.toString());
} else if (fs.exists(archive)) {
LOG.debug("copying archive {} to {}", archive, tgt);
bulkloadInfo.addArchiveFiles(archive.toString());
}
}

String srcTableQualifier = srcTable.getQualifierAsString();
String srcTableNs = srcTable.getNamespaceAsString();
Path tgtFam = new Path(tgtRoot, srcTableNs + Path.SEPARATOR + srcTableQualifier
+ Path.SEPARATOR + regionName + Path.SEPARATOR + fam);
if (!tgtFs.mkdirs(tgtFam)) {
throw new IOException("couldn't create " + tgtFam);
for (MergeSplitBulkloadInfo bulkloadInfo : toBulkload.values()) {
mergeSplitAndCopyBulkloadedHFiles(bulkloadInfo.getActiveFiles(),
bulkloadInfo.getArchiveFiles(), bulkloadInfo.getSrcTable(), tgtFs);
}
Path tgt = new Path(tgtFam, filename);
} else {
// Continuous incremental backup: run BulkLoadCollectorJob over backed-up WALs
Path collectorOutput = new Path(getBulkOutputDir(), BULKLOAD_COLLECTOR_OUTPUT);
for (TableName table : tablesToBackup) {
String walDirsCsv = String.join(",", tablesToWALFileList.get(table));
Copy link

Copilot AI Oct 21, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Potential NullPointerException if tablesToWALFileList.get(table) returns null. If the table has no WAL files in the map, String.join will throw an NPE when attempting to join null.

Suggested change
String walDirsCsv = String.join(",", tablesToWALFileList.get(table));
List<String> walDirs = tablesToWALFileList.get(table);
String walDirsCsv = String.join(",", walDirs != null ? walDirs : java.util.Collections.emptyList());

Copilot uses AI. Check for mistakes.

Path archiveDir = HFileArchiveUtil.getStoreArchivePath(conf, srcTable, regionName, fam);
Path archive = new Path(archiveDir, filename);
List<Path> bulkloadPaths =
BulkFilesCollector.collectFromWalDirs(conf, walDirsCsv, collectorOutput, table, table,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Rather than calling BulkFilesCollector directly, we can use the org.apache.hadoop.hbase.backup.impl.AbstractPitrRestoreHandler#collectBulkFiles() method, which serves as a higher-level approach and internally invokes BulkFilesCollector.collectFromWalDirs(). This helps us avoid duplicating code. In both restore and incremental backup scenarios, we need to extract bulkload files by reading WAL files within a given time range, so it makes sense to have a single logic for this. We should consider placing this common logic in a utility class under the util package.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BulkFilesCollector#collectFromWalDirs() is itself a utility function. I have computed valid WAL directory using BackupUtils#getValidWalDirs() once already in IncrementalTableBackupClient#convertWALsToHFiles() so here I am reusing that. If I call AbstractPitrRestoreHandler#collectBulkFiles() it would again call BackupUtils#getValidWalDirs()

Copy link
Contributor

@anmolnar anmolnar Oct 22, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I agree with @ankitsol . This class should not make a call to an abstract class - you would have to make the method public -, instead move more logic to the utility class if you want to share more.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

BulkFilesCollector#collectFromWalDirs() is itself a utility function. I have computed valid WAL directory using BackupUtils#getValidWalDirs() once already in IncrementalTableBackupClient#convertWALsToHFiles() so here I am reusing that. If I call AbstractPitrRestoreHandler#collectBulkFiles() it would again call BackupUtils#getValidWalDirs()

Consider passing that as a parameter. Adjust the original methods as minimally as possible to accommodate both scenarios.

This class should not make a call to an abstract class

No, as mentioned earlier, we should move the shared elements to a utility class.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Consider passing that as a parameter. Adjust the original methods as minimally as possible to accommodate both scenarios.

Please elaborate

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If I call AbstractPitrRestoreHandler#collectBulkFiles() it would again call BackupUtils#getValidWalDirs()

instead of calling BackupUtils#getValidWalDirs() inside AbstractPitrRestoreHandler#collectBulkFiles(), take the output of BackupUtils#getValidWalDirs() as parameter.

tablesToPrevBackupTs.get(table), backupInfo.getIncrCommittedWalTs());
Copy link

Copilot AI Oct 21, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Potential NullPointerException if tablesToPrevBackupTs.get(table) returns null. The map may not contain an entry for the table if no previous backup exists, which would cause an NPE when the primitive long is expected.

Suggested change
tablesToPrevBackupTs.get(table), backupInfo.getIncrCommittedWalTs());
tablesToPrevBackupTs.get(table) != null ? tablesToPrevBackupTs.get(table) : 0L, backupInfo.getIncrCommittedWalTs());

Copilot uses AI. Check for mistakes.

if (fs.exists(p)) {
if (LOG.isTraceEnabled()) {
LOG.trace("found bulk hfile {} in {} for {}", bulkLoad.getHfilePath(), p.getParent(),
srcTableQualifier);
LOG.trace("copying {} to {}", p, tgt);
List<String> bulkLoadFiles =
bulkloadPaths.stream().map(Path::toString).collect(Collectors.toList());

if (bulkLoadFiles.isEmpty()) {
LOG.info("No bulk-load files found for table {}", table);
} else {
mergeSplitAndCopyBulkloadedHFiles(bulkLoadFiles, table, tgtFs);
}
Comment on lines 208 to 211
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

[nit] using continue may align the style with the other loop of !backupInfo.isContinuousBackupEnabled()

Suggested change
if (bulkLoadFiles.isEmpty()) {
LOG.info("No bulk-load files found for table {}", table);
} else {
mergeSplitAndCopyBulkloadedHFiles(bulkLoadFiles, table, tgtFs);
}
if (bulkLoadFiles.isEmpty()) {
LOG.info("No bulk-load files found for table {}", table);
continue;
}
mergeSplitAndCopyBulkloadedHFiles(bulkLoadFiles, table, tgtFs);

bulkloadInfo.addActiveFile(p.toString());
} else if (fs.exists(archive)) {
LOG.debug("copying archive {} to {}", archive, tgt);
bulkloadInfo.addArchiveFiles(archive.toString());
}
}

for (MergeSplitBulkloadInfo bulkloadInfo : toBulkload.values()) {
mergeSplitAndCopyBulkloadedHFiles(bulkloadInfo.getActiveFiles(),
bulkloadInfo.getArchiveFiles(), bulkloadInfo.getSrcTable(), tgtFs);
}

return bulkLoads;
}

Expand Down Expand Up @@ -306,6 +307,9 @@ private void updateFileLists(List<String> activeFiles, List<String> archiveFiles
*/
@Override
public void execute() throws IOException, ColumnFamilyMismatchException {
// tablesToWALFileList and tablesToPrevBackupTs are needed for "continuous" Incremental backup
Map<TableName, List<String>> tablesToWALFileList = new HashMap<>();
Map<TableName, Long> tablesToPrevBackupTs = new HashMap<>();
try {
Map<TableName, String> tablesToFullBackupIds = getFullBackupIds();
verifyCfCompatibility(backupInfo.getTables(), tablesToFullBackupIds);
Expand Down Expand Up @@ -339,7 +343,7 @@ public void execute() throws IOException, ColumnFamilyMismatchException {
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
setupRegionLocator();
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
convertWALsToHFiles();
convertWALsToHFiles(tablesToWALFileList, tablesToPrevBackupTs);
incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
backupInfo.getBackupRootDir());
} catch (Exception e) {
Expand Down Expand Up @@ -371,7 +375,8 @@ public void execute() throws IOException, ColumnFamilyMismatchException {
backupManager.writeBackupStartCode(newStartCode);
}

List<BulkLoad> bulkLoads = handleBulkLoad(backupInfo.getTableNames());
List<BulkLoad> bulkLoads =
handleBulkLoad(backupInfo.getTableNames(), tablesToWALFileList, tablesToPrevBackupTs);

// backup complete
completeBackup(conn, backupInfo, BackupType.INCREMENTAL, conf);
Expand Down Expand Up @@ -425,10 +430,13 @@ protected void deleteBulkLoadDirectory() throws IOException {
}
}

protected void convertWALsToHFiles() throws IOException {
protected void convertWALsToHFiles(Map<TableName, List<String>> tablesToWALFileList,
Map<TableName, Long> tablesToPrevBackupTs) throws IOException {
long previousBackupTs = 0L;
long currentBackupTs = 0L;
if (backupInfo.isContinuousBackupEnabled()) {
Set<TableName> tableSet = backupInfo.getTables();
currentBackupTs = backupInfo.getIncrCommittedWalTs();
List<BackupInfo> backupInfos = backupManager.getBackupHistory(true);
for (TableName table : tableSet) {
for (BackupInfo backup : backupInfos) {
Expand All @@ -442,7 +450,9 @@ protected void convertWALsToHFiles() throws IOException {
} else {
previousBackupTs = backup.getIncrCommittedWalTs();
}
walBackupFileList = getBackupLogs(previousBackupTs);
walBackupFileList = getBackupLogs(previousBackupTs, currentBackupTs);
tablesToWALFileList.put(table, walBackupFileList);
tablesToPrevBackupTs.put(table, previousBackupTs);
walToHFiles(walBackupFileList, Arrays.asList(table.getNameAsString()),
previousBackupTs);
break;
Expand All @@ -469,7 +479,7 @@ protected void convertWALsToHFiles() throws IOException {
}
}

private List<String> getBackupLogs(long startTs) throws IOException {
private List<String> getBackupLogs(long startTs, long endTs) throws IOException {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Let's avoid duplicating code. We already have similar functionality for retrieving log files within a time range in org.apache.hadoop.hbase.backup.impl.AbstractPitrRestoreHandler#getValidWalDirs. Can we use that instead? We could move the file to a common location such as src/main/java/org/apache/hadoop/hbase/backup/util.

// get log files from backup dir
String walBackupDir = conf.get(CONF_CONTINUOUS_BACKUP_WAL_DIR);
if (Strings.isNullOrEmpty(walBackupDir)) {
Expand All @@ -494,7 +504,7 @@ private List<String> getBackupLogs(long startTs) throws IOException {
long dirStartTime = dirDate.getTime(); // Start of that day (00:00:00)
long dirEndTime = dirStartTime + ONE_DAY_IN_MILLISECONDS - 1; // End time of day (23:59:59)

if (dirEndTime >= startTs) {
if (dirEndTime >= startTs && dirStartTime <= endTs) {
Path dirPath = dayDir.getPath();
FileStatus[] logs = backupFs.listStatus(dirPath);
for (FileStatus log : logs) {
Expand Down Expand Up @@ -533,11 +543,7 @@ protected void walToHFiles(List<String> dirPaths, List<String> tableList, long p
conf.set(JOB_NAME_CONF_KEY, jobname);
if (backupInfo.isContinuousBackupEnabled()) {
conf.set(WALInputFormat.START_TIME_KEY, Long.toString(previousBackupTs));
// committedWALsTs is needed only for Incremental backups with continuous backup
// since these do not depend on log roll ts
long committedWALsTs = BackupUtils.getReplicationCheckpoint(conn);
backupInfo.setIncrCommittedWalTs(committedWALsTs);
conf.set(WALInputFormat.END_TIME_KEY, Long.toString(committedWALsTs));
conf.set(WALInputFormat.END_TIME_KEY, Long.toString(backupInfo.getIncrCommittedWalTs()));
}
String[] playerArgs = { dirs, StringUtils.join(tableList, ",") };

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@
import org.apache.hadoop.hbase.backup.BackupType;
import org.apache.hadoop.hbase.backup.HBackupFileSystem;
import org.apache.hadoop.hbase.backup.impl.BackupManifest.BackupImage;
import org.apache.hadoop.hbase.backup.util.BackupUtils;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.util.CommonFSUtils;
Expand Down Expand Up @@ -113,6 +114,12 @@ protected void beginBackup(BackupManager backupManager, BackupInfo backupInfo)
// set the start timestamp of the overall backup
long startTs = EnvironmentEdgeManager.currentTime();
backupInfo.setStartTs(startTs);
if (backupInfo.getType() == BackupType.INCREMENTAL && backupInfo.isContinuousBackupEnabled()) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why was this logic added to TableBackupClient? Wouldn't it be more appropriate to place it in IncrementalTableBackupClient?

// committedWALsTs is needed only for Incremental backups with continuous backup
// since these do not depend on log roll ts
long committedWALsTs = BackupUtils.getReplicationCheckpoint(conn);
backupInfo.setIncrCommittedWalTs(committedWALsTs);
}
// set overall backup status: ongoing
backupInfo.setState(BackupState.RUNNING);
backupInfo.setPhase(BackupPhase.REQUEST);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ public class BulkLoadCollectorJob extends Configured implements Tool {
public BulkLoadCollectorJob() {
}

protected BulkLoadCollectorJob(final Configuration c) {
public BulkLoadCollectorJob(final Configuration c) {
super(c);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -175,7 +175,7 @@ public void execute() throws IOException {
// copy out the table and region info files for each table
BackupUtils.copyTableRegionInfo(conn, backupInfo, conf);
// convert WAL to HFiles and copy them to .tmp under BACKUP_ROOT
convertWALsToHFiles();
convertWALsToHFiles(new HashMap<>(), new HashMap<>());
incrementalCopyHFiles(new String[] { getBulkOutputDir().toString() },
backupInfo.getBackupRootDir());
failStageIf(Stage.stage_2);
Expand All @@ -200,7 +200,7 @@ public void execute() throws IOException {
BackupUtils.getMinValue(BackupUtils.getRSLogTimestampMins(newTableSetTimestampMap));
backupManager.writeBackupStartCode(newStartCode);

handleBulkLoad(backupInfo.getTableNames());
handleBulkLoad(backupInfo.getTableNames(), new HashMap<>(), new HashMap<>());
failStageIf(Stage.stage_4);

// backup complete
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -163,20 +163,18 @@ public void testIncrementalBackupCopyingBulkloadTillIncrCommittedWalTs() throws
performBulkLoad("bulkPreIncr", methodName, tableName1);
expectedRowCount += ROWS_IN_BULK_LOAD;
assertEquals(expectedRowCount, TEST_UTIL.countRows(tableName1));
assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size());
assertTrue(systemTable.readBulkloadRows(List.of(tableName1)).isEmpty());
loadTable(TEST_UTIL.getConnection().getTable(tableName1));
Thread.sleep(15000);

performBulkLoad("bulkPostIncr", methodName, tableName1);
assertEquals(2, systemTable.readBulkloadRows(List.of(tableName1)).size());
assertTrue(systemTable.readBulkloadRows(List.of(tableName1)).isEmpty());

// Incremental backup
String backup2 =
backupTables(BackupType.INCREMENTAL, List.of(tableName1), BACKUP_ROOT_DIR, true);
assertTrue(checkSucceeded(backup2));

// bulkPostIncr Bulkload entry should not be deleted post incremental backup
assertEquals(1, systemTable.readBulkloadRows(List.of(tableName1)).size());
assertTrue(systemTable.readBulkloadRows(List.of(tableName1)).isEmpty());

TEST_UTIL.truncateTable(tableName1);
// Restore incremental backup
Expand Down