Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
public class BlockDirectory extends FilterDirectory implements ShutdownAwareDirectory {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());

public static final long BLOCK_SHIFT = Integer.getInteger("solr.hdfs.blockcache.blockshift", 13);
public static final long BLOCK_SHIFT = Integer.getInteger("solr.blockcache.blockshift", 13);

public static final int BLOCK_SIZE = 1 << BLOCK_SHIFT;
public static final long BLOCK_MOD = BLOCK_SIZE - 1L;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
public abstract class CustomBufferedIndexInput extends IndexInput {

public static final int BUFFER_SIZE =
Integer.getInteger("solr.hdfs.readbuffer.size.default", 32768);
Integer.getInteger("solr.blockcache.readbuffer.size.default", 32768);

private int bufferSize = BUFFER_SIZE;

Expand Down
4 changes: 2 additions & 2 deletions solr/core/src/java/org/apache/solr/blockcache/Metrics.java
Original file line number Diff line number Diff line change
Expand Up @@ -157,12 +157,12 @@ public void close() {

@Override
public String getName() {
return "hdfsBlockCache";
return "blockCache";
}

@Override
public String getDescription() {
return "Provides metrics for the HdfsDirectoryFactory BlockCache.";
return "Provides metrics for the BlockCache.";
}

@Override
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,8 +154,8 @@ private void moveReplica(
replica.getBool(ZkStateReader.SHARED_STORAGE_PROP, false) && dataDir != null;

if (isSharedFS && inPlaceMove) {
log.debug("-- moveHdfsReplica");
moveHdfsReplica(
log.debug("-- moveSharedFsReplica");
moveSharedFsReplica(
clusterState,
results,
dataDir.toString(),
Expand All @@ -179,7 +179,7 @@ private void moveReplica(
}
}

private void moveHdfsReplica(
private void moveSharedFsReplica(
ClusterState clusterState,
NamedList<Object> results,
String dataDir,
Expand Down
5 changes: 3 additions & 2 deletions solr/core/src/java/org/apache/solr/update/UpdateLog.java
Original file line number Diff line number Diff line change
Expand Up @@ -446,6 +446,7 @@ public void init(UpdateHandler uhandler, SolrCore core) {
// `init(UpdateHandler, SolrCore` is never actually called concurrently in application code
// (`TestHdfsUpdateLog.testFSThreadSafety()`, introduced by SOLR-7113, seems to be the only
// place that requires true thread safety from this method?).
// HDFS was removed in Solr 10, and therefore the test referenced is gone as well.
if (debug) {
log.debug(
"UpdateHandler init: tlogDir={}, next id={} this is a reopen or double init ... nothing else to do.",
Expand Down Expand Up @@ -509,8 +510,8 @@ protected final void maybeClearLog(SolrCore core) {

/**
* Resolves any relative path wrt the highest core-scoped level (whatever that means for a
* particular implementation). For most filesystems, this will be the core instanceDir, but there
* are other cases; e.g., HdfsUpdateLog will resolve paths relative to the core dataDir.
* particular implementation). For most filesystems, this will be the core instanceDir, but that
* is not a hard and fast rule.
*
* <p>If the input path is already absolute, it will be returned unmodified.
*
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1378,7 +1378,7 @@ private void zkCheck(UpdateCommand updateCommand) {

// Streaming updates can delay shutdown and cause big update reorderings (new streams can't be
// initiated, but existing streams carry on). This is why we check if the CC is shutdown.
// See SOLR-8203 and loop HdfsChaosMonkeyNothingIsSafeTest (and check for inconsistent shards)
// See SOLR-8203 and loop ChaosMonkeyNothingIsSafeTest (and check for inconsistent shards)
// to test.
if (req.getCoreContainer().isShutDown()) {
throw new SolrException(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -142,10 +142,10 @@ public void testEOF() throws IOException {
String name = "test.eof";
createFile(name, fsDir, directory);
long fsLength = fsDir.fileLength(name);
long hdfsLength = directory.fileLength(name);
assertEquals(fsLength, hdfsLength);
long blockLength = directory.fileLength(name);
assertEquals(fsLength, blockLength);
testEof(name, fsDir, fsLength);
testEof(name, directory, hdfsLength);
testEof(name, directory, blockLength);
fsDir.close();
}

Expand Down Expand Up @@ -189,11 +189,12 @@ public void testRandomAccessWritesLargeCache() throws IOException {
testRandomAccessWrites();
}

private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) throws IOException {
private void assertInputsEquals(String name, Directory fsDir, Directory blockDirectory)
throws IOException {
int reads = random.nextInt(MAX_NUMBER_OF_READS);
IndexInput fsInput = fsDir.openInput(name, IOContext.DEFAULT);
IndexInput hdfsInput = hdfs.openInput(name, IOContext.DEFAULT);
assertEquals(fsInput.length(), hdfsInput.length());
IndexInput blockInput = blockDirectory.openInput(name, IOContext.DEFAULT);
assertEquals(fsInput.length(), blockInput.length());
int fileLength = (int) fsInput.length();
for (int i = 0; i < reads; i++) {
int rnd;
Expand All @@ -204,7 +205,7 @@ private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) th
}

byte[] fsBuf = new byte[rnd + MIN_BUFFER_SIZE];
byte[] hdfsBuf = new byte[fsBuf.length];
byte[] blockBuf = new byte[fsBuf.length];
int offset = random.nextInt(fsBuf.length);
int length = random.nextInt(fsBuf.length - offset);

Expand All @@ -217,23 +218,24 @@ private void assertInputsEquals(String name, Directory fsDir, Directory hdfs) th

fsInput.seek(pos);
fsInput.readBytes(fsBuf, offset, length);
hdfsInput.seek(pos);
hdfsInput.readBytes(hdfsBuf, offset, length);
blockInput.seek(pos);
blockInput.readBytes(blockBuf, offset, length);
for (int f = offset; f < length; f++) {
if (fsBuf[f] != hdfsBuf[f]) {
if (fsBuf[f] != blockBuf[f]) {
fail("read [" + i + "]");
}
}
}
fsInput.close();
hdfsInput.close();
blockInput.close();
}

private void createFile(String name, Directory fsDir, Directory hdfs) throws IOException {
private void createFile(String name, Directory fsDir, Directory blockDirectory)
throws IOException {
int writes = random.nextInt(MAX_NUMBER_OF_WRITES);
int fileLength = random.nextInt(MAX_FILE_SIZE - MIN_FILE_SIZE) + MIN_FILE_SIZE;
IndexOutput fsOutput = fsDir.createOutput(name, IOContext.DEFAULT);
IndexOutput hdfsOutput = hdfs.createOutput(name, IOContext.DEFAULT);
IndexOutput blockOutput = blockDirectory.createOutput(name, IOContext.DEFAULT);
for (int i = 0; i < writes; i++) {
byte[] buf =
new byte
Expand All @@ -243,10 +245,10 @@ private void createFile(String name, Directory fsDir, Directory hdfs) throws IOE
int offset = random.nextInt(buf.length);
int length = random.nextInt(buf.length - offset);
fsOutput.writeBytes(buf, offset, length);
hdfsOutput.writeBytes(buf, offset, length);
blockOutput.writeBytes(buf, offset, length);
}
fsOutput.close();
hdfsOutput.close();
blockOutput.close();
}

private String getName() {
Expand Down
Loading
Loading