diff --git a/gradle.properties b/gradle.properties
index a83fa4d..1f47821 100644
--- a/gradle.properties
+++ b/gradle.properties
@@ -5,7 +5,7 @@ junitJupiterVersion=5.4.2
mockitoVersion=2.27.0
solrVersion=6.6.5
assVersion=2.0.8.2
-amazonVersion=1.12.782
+amazonVersion=2.35.7
jaxBVersion=4.0.5
restAssuredVersion=5.5.1
awaitablityVersion=4.3.0
diff --git a/integration-tests/build.gradle b/integration-tests/build.gradle
index c0c400c..f0183d6 100644
--- a/integration-tests/build.gradle
+++ b/integration-tests/build.gradle
@@ -28,11 +28,15 @@ dependencies {
testImplementation "org.junit.jupiter:junit-jupiter-params:${junitJupiterVersion}"
testImplementation "org.awaitility:awaitility:${awaitablityVersion}"
- testImplementation platform("com.amazonaws:aws-java-sdk-bom:${amazonVersion}")
+ testImplementation platform("software.amazon.awssdk:bom:${amazonVersion}")
- testImplementation('com.amazonaws:aws-java-sdk-core')
- testImplementation('com.amazonaws:aws-java-sdk-s3')
- testImplementation("com.amazonaws:aws-java-sdk-sts")
+ testImplementation('software.amazon.awssdk:aws-core')
+ testImplementation('software.amazon.awssdk:s3')
+ testImplementation("software.amazon.awssdk:sts")
+
+ testImplementation "software.amazon.awssdk:apache-client:${amazonVersion}"
+
+ testImplementation "software.amazon.awssdk:netty-nio-client:${amazonVersion}"
testRuntimeOnly "org.glassfish.jaxb:jaxb-runtime:${jaxBVersion}"
}
diff --git a/integration-tests/src/test/java/eu/xenit/solr/backup/s3/SolrBackupTest.java b/integration-tests/src/test/java/eu/xenit/solr/backup/s3/SolrBackupTest.java
index e420129..314af03 100644
--- a/integration-tests/src/test/java/eu/xenit/solr/backup/s3/SolrBackupTest.java
+++ b/integration-tests/src/test/java/eu/xenit/solr/backup/s3/SolrBackupTest.java
@@ -1,13 +1,5 @@
package eu.xenit.solr.backup.s3;
-import com.amazonaws.ClientConfiguration;
-import com.amazonaws.Protocol;
-import com.amazonaws.auth.AWSStaticCredentialsProvider;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.client.builder.AwsClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.s3.model.ObjectListing;
import groovy.util.logging.Slf4j;
import io.restassured.RestAssured;
import io.restassured.builder.RequestSpecBuilder;
@@ -20,7 +12,20 @@
import org.junit.jupiter.api.Order;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.TestMethodOrder;
-
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.http.apache.ApacheHttpClient;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.S3ClientBuilder;
+import software.amazon.awssdk.services.s3.S3Configuration;
+import software.amazon.awssdk.services.s3.model.ListObjectsRequest;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.concurrent.TimeUnit;
import static io.restassured.RestAssured.given;
@@ -37,11 +42,11 @@ class SolrBackupTest {
static RequestSpecification specBackupDetails;
static RequestSpecification specRestore;
static RequestSpecification specRestoreStatus;
- static AmazonS3 s3Client;
+ static S3Client s3Client;
static final String BUCKET = "bucket";
@BeforeEach
- public void setup() {
+ public void setup() throws URISyntaxException {
String basePathSolr = "solr/alfresco";
String basePathSolrBackup = "solr/alfresco/replication";
String solrHost = System.getProperty("solr.host", "localhost");
@@ -148,14 +153,22 @@ void testBackupWithNumberToLiveEndpoint() {
void validateSnapshotCount(long count) {
await().atMost(180, TimeUnit.SECONDS)
- .until(() -> s3Client.listObjects(BUCKET)
- .getObjectSummaries()
+ /*
+ * SDK v2 Migration:
+ * - Switched to `ListObjectsV2Request` for the S3 call.
+ * - The response object's method to get the list of objects is now `contents()`, not `objectSummaries()`.
+ * - The object class is `S3Object`, which has the same `size()` and `key()` methods.
+ */
+ .until(() -> s3Client.listObjectsV2(ListObjectsV2Request.builder().bucket(BUCKET)
+ .build())
+ .contents()
.stream()
- .filter(s3ObjectSummary -> s3ObjectSummary.getSize() == 0
- && s3ObjectSummary.getKey().contains("snapshot"))
+ .filter(s3Object -> s3Object.size() == 0
+ && s3Object.key().contains("snapshot"))
.count() == count);
}
+
private void callBackupEndpoint() {
callBackupEndpoint(0);
}
@@ -187,13 +200,30 @@ private void callBackupEndpoint(int count) {
});
}
- private AmazonS3 createInternalClient(
- String region, String endpoint, String accessKey, String secretKey) {
- ClientConfiguration clientConfig = new ClientConfiguration().withProtocol(Protocol.HTTPS);
- AmazonS3ClientBuilder clientBuilder = AmazonS3ClientBuilder.standard().withClientConfiguration(clientConfig);
- clientBuilder.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)));
- clientBuilder.setEndpointConfiguration(new AwsClientBuilder.EndpointConfiguration(endpoint, region));
- clientBuilder.withPathStyleAccessEnabled(true);
+ private S3Client createInternalClient(
+ String region, String endpoint, String accessKey, String secretKey) throws URISyntaxException {
+ // SDK v2 Migration: Removed explicit protocol setting, as it's inferred from the endpoint URI.
+ ClientOverrideConfiguration clientConfig = ClientOverrideConfiguration.builder().build();
+
+ S3Configuration configuration = S3Configuration.builder()
+ .build();
+
+ S3ClientBuilder clientBuilder = S3Client.builder()
+ .httpClientBuilder(ApacheHttpClient.builder()).overrideConfiguration(clientConfig)
+ .serviceConfiguration(configuration);
+ clientBuilder.credentialsProvider(StaticCredentialsProvider.create(AwsBasicCredentials.create(accessKey, secretKey)));
+
+ /*
+ * SDK v2 Migration:
+ * - Replaced the v1 `setEndpointConfiguration` with `endpointOverride` and `region`.
+ * - `endpointOverride` takes a URI object.
+ * - `region` must be set separately.
+ */
+ clientBuilder.endpointOverride(new URI(endpoint));
+ clientBuilder.region(Region.of(region));
+
+ // SDK v2 Migration: Replaced `pathStyleAccessEnabled(true)` with `forcePathStyle(true)`.
+ clientBuilder.forcePathStyle(true);
return clientBuilder.build();
}
diff --git a/integration-tests/src/test/resources/compose/docker-compose.yml b/integration-tests/src/test/resources/compose/docker-compose.yml
index dec52eb..6b2708f 100644
--- a/integration-tests/src/test/resources/compose/docker-compose.yml
+++ b/integration-tests/src/test/resources/compose/docker-compose.yml
@@ -45,6 +45,7 @@ services:
- S3_ACCESS_KEY=access_key
- S3_SECRET_KEY=secret_key
- S3_PATH_STYLE_ACCESS_ENABLED=true
+ - S3_CLIENT_CHECKSUM_VALIDATION_ENABLED=false
localstack:
container_name: localstack
@@ -57,6 +58,6 @@ services:
- AWS_SECRET_ACCESS_KEY=secret_key
- AWS_DEFAULT_REGION=us-east-1
volumes:
- - ./aws:/etc/localstack/init/ready.d
+ - ./aws:/etc/localstack/init/ready.d:r,Z
diff --git a/integration-tests/src/test/resources/solr.xml b/integration-tests/src/test/resources/solr.xml
index 3894bd6..d7532a9 100644
--- a/integration-tests/src/test/resources/solr.xml
+++ b/integration-tests/src/test/resources/solr.xml
@@ -10,7 +10,10 @@
${S3_SECRET_KEY:}
${S3_PROXY_HOST:}
${S3_PROXY_PORT:0}
+
${S3_PATH_STYLE_ACCESS_ENABLED:false}
+ ${S3_CLIENT_CHECKSUM_VALIDATION_ENABLED:true}
+ ${S3_CLIENT_PROGRESS_LOG_BYTE_INTERVAL:4194304}
diff --git a/solr-backup/build.gradle b/solr-backup/build.gradle
index 46d8c54..ead38cb 100644
--- a/solr-backup/build.gradle
+++ b/solr-backup/build.gradle
@@ -20,11 +20,17 @@ dependencies {
}
compileOnly "org.alfresco:alfresco-search:${assVersion}"
- implementation platform("com.amazonaws:aws-java-sdk-bom:${amazonVersion}")
+ implementation platform("software.amazon.awssdk:bom:${amazonVersion}")
- implementation('com.amazonaws:aws-java-sdk-core')
- implementation('com.amazonaws:aws-java-sdk-s3')
- implementation("com.amazonaws:aws-java-sdk-sts")
+ implementation('ch.qos.logback:logback-classic:1.4.14')
+ compileOnly 'org.projectlombok:lombok:1.18.32'
+ annotationProcessor 'org.projectlombok:lombok:1.18.32'
+
+ implementation('software.amazon.awssdk:aws-core')
+ implementation('software.amazon.awssdk:s3')
+ implementation("software.amazon.awssdk:sts")
+ implementation "software.amazon.awssdk:apache-client:${amazonVersion}"
+ implementation "software.amazon.awssdk:netty-nio-client:${amazonVersion}"
testImplementation("org.apache.solr:solr-core:${solrVersion}") {
exclude group: 'org.restlet.jee' // Only available in JCenter, not essential in this project.
}
diff --git a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/ProgressTrackingInputStream.java b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/ProgressTrackingInputStream.java
new file mode 100644
index 0000000..6c3fd39
--- /dev/null
+++ b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/ProgressTrackingInputStream.java
@@ -0,0 +1,52 @@
+package eu.xenit.solr.backup.s3;
+
+import lombok.NonNull;
+
+import java.io.FilterInputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.util.function.Consumer;
+
+/**
+ * Custom InputStream wrapper that reports the number of bytes read to a listener.
+ */
+public class ProgressTrackingInputStream extends FilterInputStream {
+
+ private final @NonNull Consumer listener;
+ private long bytesRead = 0;
+
+ /**
+ * Creates a {@code FilterInputStream}
+ * by assigning the argument {@code in}
+ * to the field {@code this.in} so as
+ * to remember it for later use.
+ *
+ * @param in the underlying input stream, or {@code null} if
+ * this instance is to be created without an underlying stream.
+ * @param listener the listener to report the number of bytes read to
+ */
+ protected ProgressTrackingInputStream(InputStream in, @NonNull Consumer listener) {
+ super(in);
+ this.listener = listener;
+ }
+
+ @Override
+ public int read() throws IOException {
+ int b = super.read();
+ if (b != -1) {
+ bytesRead += 1;
+ listener.accept(bytesRead);
+ }
+ return b;
+ }
+
+ @Override
+ public int read(byte[] b, int off, int len) throws IOException {
+ int bytes = super.read(b, off, len);
+ if (bytes > 0) {
+ bytesRead += bytes;
+ listener.accept(bytesRead);
+ }
+ return bytes;
+ }
+}
diff --git a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepository.java b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepository.java
index dbfea7f..c4ff639 100644
--- a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepository.java
+++ b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepository.java
@@ -66,12 +66,16 @@ public void init(NamedList args) {
this.config = args;
S3BackupRepositoryConfig backupConfig = new S3BackupRepositoryConfig(this.config);
- // If a client was already created, close it to avoid any resource leak
+ // If a client was already created, close it to avoid any resource leak
if (client != null) {
client.close();
}
- this.client = backupConfig.buildClient();
+ try {
+ this.client = backupConfig.buildClient();
+ } catch (URISyntaxException e) {
+ throw new RuntimeException(e);
+ }
}
@Override
diff --git a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepositoryConfig.java b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepositoryConfig.java
index 5cb0428..6ac4721 100644
--- a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepositoryConfig.java
+++ b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3BackupRepositoryConfig.java
@@ -16,31 +16,48 @@
*/
package eu.xenit.solr.backup.s3;
+import lombok.Getter;
import org.apache.solr.common.util.NamedList;
+import java.net.URISyntaxException;
+
/**
* Class representing the {@code backup} S3 config bundle specified in solr.xml. All user-provided
* config can be overridden via environment variables (use uppercase, with '_' instead of '.'), see
* {@link S3BackupRepositoryConfig#toEnvVar}.
*/
+@Getter
public class S3BackupRepositoryConfig {
public static final String S3_BUCKET_NAME = "s3.bucket.name";
public static final String S3_REGION = "s3.region";
public static final String S3_ACCESS_KEY = "s3.access.key";
public static final String S3_SECRET_KEY = "s3.secret.key";
public static final String S3_ENDPOINT = "s3.endpoint";
+ public static final String S3_PATH_STYLE_ACCESS_ENABLED = "s3.path.style.access.enabled";
+ public static final String S3_CLIENT_CHECKSUM_VALIDATION_ENABLED = "s3.client.checksum.validation.enabled";
public static final String S3_PROXY_HOST = "s3.proxy.host";
public static final String S3_PROXY_PORT = "s3.proxy.port";
- public static final String S3_PATH_STYLE_ACCESS_ENABLED = "s3.path.style.access.enabled";
+ public static final String S3_CLIENT_PROGRESS_LOG_BYTE_INTERVAL = "s3.client.progressLogByteInterval";
private final String bucketName;
+
private final String region;
+
private final String accessKey;
+
private final String secretKey;
+
private final String proxyHost;
+
private final int proxyPort;
+
private final String endpoint;
- private final Boolean pathStyleAccessEnabled;
+
+ private final boolean pathStyleAccessEnabled;
+
+ private final boolean checksumValidationEnabled;
+
+ private final int progressLogByteInterval;
public S3BackupRepositoryConfig(NamedList> config) {
@@ -52,13 +69,15 @@ public S3BackupRepositoryConfig(NamedList> config) {
accessKey = getStringConfig(config, S3_ACCESS_KEY);
secretKey = getStringConfig(config, S3_SECRET_KEY);
pathStyleAccessEnabled = getBooleanConfig(config, S3_PATH_STYLE_ACCESS_ENABLED);
+ checksumValidationEnabled = getBooleanConfig(config, S3_CLIENT_CHECKSUM_VALIDATION_ENABLED);
+ progressLogByteInterval = getIntConfig(config, S3_CLIENT_PROGRESS_LOG_BYTE_INTERVAL);
}
/**
* @return a {@link S3StorageClient} from the provided config.
*/
- public S3StorageClient buildClient() {
- return new S3StorageClient(bucketName, region, proxyHost, proxyPort, endpoint, accessKey, secretKey, pathStyleAccessEnabled);
+ public S3StorageClient buildClient() throws URISyntaxException {
+ return new S3StorageClient(this);
}
private static String getStringConfig(NamedList> config, String property) {
diff --git a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3OutputStream.java b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3OutputStream.java
index c3c1edd..6ffe5f7 100644
--- a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3OutputStream.java
+++ b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3OutputStream.java
@@ -16,25 +16,27 @@
*/
package eu.xenit.solr.backup.s3;
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.event.ProgressEvent;
-import com.amazonaws.event.SyncProgressListener;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.model.AbortMultipartUploadRequest;
-import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
-import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PartETag;
-import com.amazonaws.services.s3.model.UploadPartRequest;
+import software.amazon.awssdk.core.sync.RequestBody;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.core.exception.SdkException;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.model.AbortMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompleteMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.CompletedMultipartUpload;
+import software.amazon.awssdk.services.s3.model.CompletedPart;
+import software.amazon.awssdk.services.s3.model.CreateMultipartUploadRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartRequest;
+import software.amazon.awssdk.services.s3.model.UploadPartResponse;
import java.io.ByteArrayInputStream;
import java.io.IOException;
+import java.io.InputStream;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.List;
+import java.util.function.Consumer;
/**
* Implementation is adapted from
@@ -52,25 +54,23 @@ public class S3OutputStream extends OutputStream {
static final int PART_SIZE = 16777216;
static final int MIN_PART_SIZE = 5242880;
- private final AmazonS3 s3Client;
- private final String bucketName;
+ private final S3Client s3Client;
private final String key;
- private final SyncProgressListener progressListener;
private volatile boolean closed;
private final ByteBuffer buffer;
private MultipartUpload multiPartUpload;
+ private final S3BackupRepositoryConfig configuration;
- public S3OutputStream(AmazonS3 s3Client, String key, String bucketName) {
+ public S3OutputStream(S3Client s3Client, String key, S3BackupRepositoryConfig configuration) {
this.s3Client = s3Client;
- this.bucketName = bucketName;
+ this.configuration = configuration;
this.key = key;
this.closed = false;
this.buffer = ByteBuffer.allocate(PART_SIZE);
- this.progressListener = new ConnectProgressListener();
this.multiPartUpload = null;
if (log.isDebugEnabled()) {
- log.debug("Created S3OutputStream for bucketName '{}' key '{}'", bucketName, key);
+ log.debug("Created S3OutputStream for bucketName '{}' key '{}'", this.configuration.getBucketName(), key);
}
}
@@ -130,17 +130,17 @@ private void uploadPart(boolean isLastPart) throws IOException {
if (multiPartUpload == null) {
if (log.isDebugEnabled()) {
- log.debug("New multi-part upload for bucketName '{}' key '{}'", bucketName, key);
+ log.debug("New multi-part upload for bucketName '{}' key '{}'", this.configuration.getBucketName(), key);
}
multiPartUpload = newMultipartUpload();
}
try {
- multiPartUpload.uploadPart(new ByteArrayInputStream(buffer.array()), size, isLastPart);
+ multiPartUpload.uploadPart(new ByteArrayInputStream(buffer.array()), size);
} catch (Exception e) {
if (multiPartUpload != null) {
multiPartUpload.abort();
if (log.isDebugEnabled()) {
- log.debug("Multipart upload aborted for bucketName '{}' key '{}'.", bucketName, key);
+ log.debug("Multipart upload aborted for bucketName '{}' key '{}'.", this.configuration.getBucketName(), key);
}
}
throw new S3Exception("Part upload failed: ", e);
@@ -181,28 +181,21 @@ public void close() throws IOException {
}
private MultipartUpload newMultipartUpload() throws IOException {
- InitiateMultipartUploadRequest initRequest =
- new InitiateMultipartUploadRequest(bucketName, key, new ObjectMetadata());
-
+ CreateMultipartUploadRequest initRequest =
+ CreateMultipartUploadRequest.builder()
+ .bucket(this.configuration.getBucketName())
+ .key(key)
+ .build();
try {
- return new MultipartUpload(s3Client.initiateMultipartUpload(initRequest).getUploadId());
- } catch (AmazonClientException e) {
+ return new MultipartUpload(s3Client.createMultipartUpload(initRequest).uploadId());
+ } catch (SdkException e) {
throw S3StorageClient.handleAmazonException(e);
}
}
- // Placeholder listener for now, just logs the event progress.
- private static class ConnectProgressListener extends SyncProgressListener {
- public void progressChanged(ProgressEvent progressEvent) {
- if (log.isDebugEnabled()) {
- log.debug("Progress event {}", progressEvent);
- }
- }
- }
-
private class MultipartUpload {
private final String uploadId;
- private final List partETags;
+ private final List partETags;
public MultipartUpload(String uploadId) {
this.uploadId = uploadId;
@@ -210,30 +203,55 @@ public MultipartUpload(String uploadId) {
if (log.isDebugEnabled()) {
log.debug(
"Initiated multi-part upload for bucketName '{}' key '{}' with id '{}'",
- bucketName,
+ configuration.getBucketName(),
key,
uploadId);
}
}
- void uploadPart(ByteArrayInputStream inputStream, int partSize, boolean isLastPart) {
+ void uploadPart(ByteArrayInputStream inputStream, int partSize) {
int currentPartNumber = partETags.size() + 1;
+ /*
+ * SDK v2 Migration:
+ * - Use RequestBody.fromInputStream to stream data.
+ * - The partSize is now a parameter of RequestBody.fromInputStream.
+ * - Removed non-existent builder methods: `inputStream`, `partSize`, `lastPart`, `generalProgressListener`.
+ * - Pass `contentLength` to request
+ * - Wrap the input stream into a progress listening input stream.
+ */
+ Consumer progressListener = new Consumer<>() {
+ private Long lastCheckpointBytes = 0L;
+
+ @Override
+ public void accept(Long totalBytesTransferred) {
+ if (totalBytesTransferred - lastCheckpointBytes >= configuration.getProgressLogByteInterval()) {
+ log.debug("Progress: {} bytes", totalBytesTransferred);
+ lastCheckpointBytes = totalBytesTransferred;
+ }
+ }
+ };
+ InputStream trackedStream = new ProgressTrackingInputStream(inputStream, progressListener);
+ RequestBody body = RequestBody.fromInputStream(trackedStream, partSize);
+
UploadPartRequest request =
- new UploadPartRequest()
- .withKey(key)
- .withBucketName(bucketName)
- .withUploadId(uploadId)
- .withInputStream(inputStream)
- .withPartNumber(currentPartNumber)
- .withPartSize(partSize)
- .withLastPart(isLastPart)
- .withGeneralProgressListener(progressListener);
+ UploadPartRequest.builder()
+ .bucket(configuration.getBucketName())
+ .key(key)
+ .uploadId(uploadId)
+ .partNumber(currentPartNumber)
+ .contentLength((long) partSize)
+ .build();
if (log.isDebugEnabled()) {
log.debug("Uploading part {} for id '{}'", currentPartNumber, uploadId);
}
- partETags.add(s3Client.uploadPart(request).getPartETag());
+ UploadPartResponse response = s3Client.uploadPart(request, body);
+ CompletedPart part = CompletedPart.builder()
+ .partNumber(currentPartNumber)
+ .eTag(response.eTag())
+ .build();
+ partETags.add(part);
}
/**
@@ -244,7 +262,15 @@ void complete() {
log.debug("Completing multi-part upload for key '{}', id '{}'", key, uploadId);
}
CompleteMultipartUploadRequest completeRequest =
- new CompleteMultipartUploadRequest(bucketName, key, uploadId, partETags);
+ CompleteMultipartUploadRequest.builder()
+ .bucket(configuration.getBucketName())
+ .key(key)
+ .uploadId(uploadId)
+ .multipartUpload(
+ CompletedMultipartUpload.builder()
+ .parts(partETags)
+ .build())
+ .build();
s3Client.completeMultipartUpload(completeRequest);
}
@@ -253,7 +279,12 @@ public void abort() {
log.warn("Aborting multi-part upload with id '{}'", uploadId);
}
try {
- s3Client.abortMultipartUpload(new AbortMultipartUploadRequest(bucketName, key, uploadId));
+ s3Client.abortMultipartUpload(AbortMultipartUploadRequest
+ .builder()
+ .bucket(configuration.getBucketName())
+ .key(key)
+ .uploadId(uploadId)
+ .build());
} catch (Exception e) {
// ignoring failure on abort.
if (log.isWarnEnabled()) {
diff --git a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3StorageClient.java b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3StorageClient.java
index 6e1fcee..d3e5a3e 100644
--- a/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3StorageClient.java
+++ b/solr-backup/src/main/java/eu/xenit/solr/backup/s3/S3StorageClient.java
@@ -16,34 +16,49 @@
*/
package eu.xenit.solr.backup.s3;
-import com.amazonaws.AmazonClientException;
-import com.amazonaws.AmazonServiceException;
-import com.amazonaws.ClientConfiguration;
-import com.amazonaws.Protocol;
-import com.amazonaws.auth.AWSStaticCredentialsProvider;
-import com.amazonaws.auth.BasicAWSCredentials;
-import com.amazonaws.client.builder.AwsClientBuilder;
-import com.amazonaws.services.s3.AmazonS3;
-import com.amazonaws.services.s3.AmazonS3ClientBuilder;
-import com.amazonaws.services.s3.model.DeleteObjectsRequest;
-import com.amazonaws.services.s3.model.DeleteObjectsRequest.KeyVersion;
-import com.amazonaws.services.s3.model.DeleteObjectsResult;
-import com.amazonaws.services.s3.model.ListObjectsRequest;
-import com.amazonaws.services.s3.model.ObjectListing;
-import com.amazonaws.services.s3.model.ObjectMetadata;
-import com.amazonaws.services.s3.model.PutObjectRequest;
-import com.amazonaws.services.s3.model.S3Object;
-import com.amazonaws.services.s3.model.S3ObjectSummary;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.Lists;
import org.apache.commons.io.input.ClosedInputStream;
import org.apache.solr.common.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
+import software.amazon.awssdk.auth.credentials.AwsBasicCredentials;
+import software.amazon.awssdk.auth.credentials.StaticCredentialsProvider;
+import software.amazon.awssdk.awscore.client.builder.AwsSyncClientBuilder;
+import software.amazon.awssdk.awscore.exception.AwsServiceException;
+import software.amazon.awssdk.core.ResponseInputStream;
+import software.amazon.awssdk.core.client.config.ClientOverrideConfiguration;
+import software.amazon.awssdk.core.exception.SdkException;
+import software.amazon.awssdk.core.sync.RequestBody;
+import software.amazon.awssdk.http.apache.ApacheHttpClient;
+import software.amazon.awssdk.http.apache.ProxyConfiguration;
+import software.amazon.awssdk.regions.Region;
+import software.amazon.awssdk.services.s3.S3Client;
+import software.amazon.awssdk.services.s3.S3ClientBuilder;
+import software.amazon.awssdk.services.s3.S3Configuration;
+import software.amazon.awssdk.services.s3.model.CommonPrefix;
+import software.amazon.awssdk.services.s3.model.Delete;
+import software.amazon.awssdk.services.s3.model.DeleteObjectRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectsRequest;
+import software.amazon.awssdk.services.s3.model.DeleteObjectsResponse;
+import software.amazon.awssdk.services.s3.model.DeletedObject;
+import software.amazon.awssdk.services.s3.model.GetObjectRequest;
+import software.amazon.awssdk.services.s3.model.GetObjectResponse;
+import software.amazon.awssdk.services.s3.model.HeadObjectRequest;
+import software.amazon.awssdk.services.s3.model.HeadObjectResponse;
+import software.amazon.awssdk.services.s3.model.ListObjectsRequest;
+import software.amazon.awssdk.services.s3.model.ListObjectsResponse;
+import software.amazon.awssdk.services.s3.model.ListObjectsV2Request;
+import software.amazon.awssdk.services.s3.model.NoSuchKeyException;
+import software.amazon.awssdk.services.s3.model.ObjectIdentifier;
+import software.amazon.awssdk.services.s3.model.PutObjectRequest;
+import software.amazon.awssdk.services.s3.model.S3Object;
import java.io.Closeable;
import java.io.InputStream;
import java.io.OutputStream;
+import java.net.URI;
+import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
@@ -56,7 +71,7 @@
/**
- * Creates a {@link AmazonS3} for communicating with AWS S3. Utilizes the default credential
+ * Creates a {@link S3Client} for communicating with AWS S3. Utilizes the default credential
* provider chain; reference AWS SDK
* docs for details on where this client will fetch credentials from, and the order of
@@ -77,58 +92,69 @@ class S3StorageClient {
// Error messages returned by S3 for a key not found.
private static final Set NOT_FOUND_CODES = Set.of("NoSuchKey", "404 Not Found");
- private final AmazonS3 s3Client;
+ private final S3Client s3Client;
+ private final S3BackupRepositoryConfig configuration;
- /**
- * The S3 bucket where we read/write all data.
- */
- private final String bucketName;
-
- S3StorageClient(
- String bucketName, String region, String proxyHost, int proxyPort, String endpoint, String accessKey, String secretKey, Boolean pathStyleAccessEnabled) {
- this(createInternalClient(region, proxyHost, proxyPort, endpoint, accessKey, secretKey, pathStyleAccessEnabled), bucketName);
+ S3StorageClient(S3BackupRepositoryConfig config) throws URISyntaxException {
+ this(createInternalClient(config), config);
}
@VisibleForTesting
- S3StorageClient(AmazonS3 s3Client, String bucketName) {
+ S3StorageClient(S3Client s3Client, S3BackupRepositoryConfig configuration) {
this.s3Client = s3Client;
- this.bucketName = bucketName;
+ this.configuration = configuration;
}
- private static AmazonS3 createInternalClient(
- String region,
- String proxyHost,
- int proxyPort,
- String endpoint,
- String accessKey,
- String secretKey, Boolean pathStyleAccessEnabled) {
- ClientConfiguration clientConfig = new ClientConfiguration().withProtocol(Protocol.HTTPS);
- // If configured, add proxy
- if (!StringUtils.isEmpty(proxyHost)) {
- clientConfig.setProxyHost(proxyHost);
- if (proxyPort > 0) {
- clientConfig.setProxyPort(proxyPort);
- }
+ private static S3Client createInternalClient(S3BackupRepositoryConfig config) throws URISyntaxException {
+
+ S3ClientBuilder clientBuilder = S3Client.builder();
+
+ S3Configuration configuration = S3Configuration.builder()
+ .checksumValidationEnabled(config.isChecksumValidationEnabled())
+ .build();
+ clientBuilder.serviceConfiguration(configuration);
+
+ /*
+ * SDK v2 Migration: Proxy settings are now configured on the HTTP client,
+ * not on a general client configuration object.
+ */
+ ApacheHttpClient.Builder httpClientBuilder = ApacheHttpClient.builder();
+ if (!StringUtils.isEmpty(config.getProxyHost())) {
+ ProxyConfiguration.Builder proxyConfigBuilder = ProxyConfiguration.builder()
+ .endpoint(URI.create(config.getProxyHost() + ":" + config.getProxyPort()));
+ httpClientBuilder.proxyConfiguration(proxyConfigBuilder.build());
}
+ clientBuilder.httpClientBuilder(httpClientBuilder);
/*
- * Default s3 client builder loads credentials from disk and handles token refreshes
+ * SDK v2 Migration: ClientOverrideConfiguration is still used for high-level
+ * configuration, but protocol and proxy settings have been moved.
+ * The protocol is now inferred from the endpoint URI (defaulting to HTTPS).
*/
- AmazonS3ClientBuilder clientBuilder =
- AmazonS3ClientBuilder.standard()
- .withClientConfiguration(clientConfig);
- if (!(StringUtils.isEmpty(accessKey) || StringUtils.isEmpty(secretKey))) {
- clientBuilder.withCredentials(new AWSStaticCredentialsProvider(new BasicAWSCredentials(accessKey, secretKey)));
+ clientBuilder.overrideConfiguration(ClientOverrideConfiguration.builder().build());
+
+ if (!(StringUtils.isEmpty(config.getAccessKey()) || StringUtils.isEmpty(config.getSecretKey()))) {
+ clientBuilder.credentialsProvider(
+ StaticCredentialsProvider.create(AwsBasicCredentials.create(config.getAccessKey(), config.getSecretKey())));
} else {
log.info("No accessKey or secretKey configured, using default credentials provider chain");
}
- if (!StringUtils.isEmpty(endpoint)) {
- clientBuilder.setEndpointConfiguration(
- new AwsClientBuilder.EndpointConfiguration(endpoint, region));
- } else {
- clientBuilder.setRegion(region);
+
+ /*
+ * SDK v2 Migration: `setEndpointConfiguration` from v1 is replaced by
+ * `endpointOverride`. The region must still be set separately.
+ */
+ if (!StringUtils.isEmpty(config.getEndpoint())) {
+ clientBuilder.endpointOverride(new URI(config.getEndpoint()));
}
- clientBuilder.withPathStyleAccessEnabled(pathStyleAccessEnabled);
+ clientBuilder.region(Region.of(config.getRegion()));
+
+ /*
+ * SDK v2 Migration: The method `withPathStyleAccessEnabled(boolean)` from v1 is
+ * replaced by `forcePathStyle(boolean)` in v2.
+ */
+ clientBuilder.forcePathStyle(config.isPathStyleAccessEnabled());
+
return clientBuilder.build();
}
@@ -144,18 +170,21 @@ void createDirectory(String path) throws S3Exception {
// throw new S3Exception("Parent directory doesn't exist, path=" + path);
}
- ObjectMetadata objectMetadata = new ObjectMetadata();
- objectMetadata.setContentType(S3_DIR_CONTENT_TYPE);
- objectMetadata.setContentLength(0);
- objectMetadata.setUserMetadata(Collections.singletonMap("Content-type", S3_DIR_CONTENT_TYPE));
-
- // Create empty object with header
- InputStream im = ClosedInputStream.CLOSED_INPUT_STREAM;
-
try {
- PutObjectRequest putRequest = new PutObjectRequest(bucketName, path, im, objectMetadata);
- s3Client.putObject(putRequest);
- } catch (AmazonClientException ase) {
+ /*
+ * SDK v2 Migration:
+ * - Removed the v1-style use of an empty InputStream and an ObjectMetadata object.
+ * - Replaced the v1 constructor `new PutObjectRequest(...)` with the v2 builder pattern.
+ * In v2, request parameters like bucket and key are set using builder methods.
+ */
+ PutObjectRequest putRequest = PutObjectRequest.builder()
+ .bucket(this.configuration.getBucketName())
+ .key(path)
+ .contentType(S3_DIR_CONTENT_TYPE)
+ .metadata(Collections.singletonMap("Content-Type", S3_DIR_CONTENT_TYPE))
+ .build();
+ s3Client.putObject(putRequest, RequestBody.empty());
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
@@ -210,60 +239,71 @@ void deleteDirectory(String path) throws S3Exception {
String[] listDir(String path) throws S3Exception {
path = sanitizedDirPath(path);
- String prefix = "";
- if (!path.equals("/")) prefix = path;
- ListObjectsRequest listRequest =
- new ListObjectsRequest()
- .withBucketName(bucketName)
- .withPrefix(prefix)
- .withDelimiter(S3_FILE_PATH_DELIMITER);
+ String prefix = path.equals("/") ? "" : path;
+
+ // The request MUST include the delimiter.
+ ListObjectsV2Request listRequest = ListObjectsV2Request.builder()
+ .bucket(this.configuration.getBucketName())
+ .prefix(prefix)
+ .delimiter(S3_FILE_PATH_DELIMITER)
+ .build();
List entries = new ArrayList<>();
try {
- ObjectListing objectListing = s3Client.listObjects(listRequest);
-
- while (true) {
- List files =
- objectListing.getObjectSummaries().stream()
- .map(S3ObjectSummary::getKey)
- .collect(Collectors.toList());
- files.addAll(objectListing.getCommonPrefixes());
- // This filtering is needed only for S3mock. Real S3 does not ignore the trailing '/' in the
- // prefix.
- String finalPrefix = prefix;
- files =
- files.stream()
- .filter(s -> s.startsWith(finalPrefix))
- .map(s -> s.substring(finalPrefix.length()))
- .filter(s -> !s.isEmpty())
- .filter(
- s -> {
- int slashIndex = s.indexOf(S3_FILE_PATH_DELIMITER);
- return slashIndex == -1 || slashIndex == s.length() - 1;
- })
- .map(
- s -> {
- if (s.endsWith(S3_FILE_PATH_DELIMITER)) {
- return s.substring(0, s.length() - 1);
- }
- return s;
- })
- .collect(Collectors.toList());
-
- entries.addAll(files);
-
- if (objectListing.isTruncated()) {
- objectListing = s3Client.listNextBatchOfObjects(objectListing);
- } else {
- break;
- }
- }
+ // The v2 paginator correctly handles multiple pages of results.
+ s3Client.listObjectsV2Paginator(listRequest).forEach(page -> {
+
+ // 1. Get the list of files (S3Object) at the current level.
+ List files = page.contents().stream()
+ .map(S3Object::key)
+ .collect(Collectors.toList());
+
+ // 2. Get the list of directories (CommonPrefix) at the current level.
+ List directories = page.commonPrefixes().stream()
+ .map(CommonPrefix::prefix)
+ .collect(Collectors.toList());
+
+ // 3. Combine them into a single list to be processed.
+ files.addAll(directories);
+
+ // This filtering is only needed for S3mock. Real S3 does not ignore the trailing '/' in the prefix
+ List processedEntries = files.stream()
+ .filter(s -> s.startsWith(prefix))
+ .map(s -> s.substring(prefix.length()))
+ .filter(s -> !s.isEmpty())
+ .filter(s -> {
+ int slashIndex = s.indexOf(S3_FILE_PATH_DELIMITER);
+ return slashIndex == -1 || slashIndex == s.length() - 1;
+ })
+ .map(s -> {
+ if (s.endsWith(S3_FILE_PATH_DELIMITER)) {
+ return s.substring(0, s.length() - 1);
+ }
+ return s;
+ })
+ .collect(Collectors.toList());
+
+ entries.addAll(processedEntries);
+ });
+
return entries.toArray(new String[0]);
- } catch (AmazonClientException ase) {
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
+ HeadObjectResponse getObjectMetadata(String path) throws SdkException {
+ /*
+ * SDK v2 Migration: Replaced the v1 `getObjectMetadata` method with a `headObject` call.
+ * This is the standard v2 way to retrieve object metadata without fetching the object's content.
+ */
+ HeadObjectRequest request = HeadObjectRequest.builder()
+ .bucket(this.configuration.getBucketName())
+ .key(path)
+ .build();
+ return s3Client.headObject(request);
+ }
+
/**
* Check if path exists.
*
@@ -282,8 +322,19 @@ boolean pathExists(String path) throws S3Exception {
}
try {
- return s3Client.doesObjectExist(bucketName, path);
- } catch (AmazonClientException ase) {
+ /*
+ * SDK v2 Migration: Replaced the v1 `doesObjectExist` convenience method.
+ * The standard v2 pattern is to make a lightweight `headObject` request.
+ * If the request succeeds, the object exists. If it throws a `NoSuchKeyException`,
+ * the object does not exist.
+ */
+ getObjectMetadata(path);
+ return true;
+ } catch (NoSuchKeyException e) {
+ // This is the expected exception when an object is not found.
+ return false;
+ } catch (SdkException ase) {
+ // Any other exception indicates a real problem (e.g., permissions).
throw handleAmazonException(ase);
}
}
@@ -298,21 +349,27 @@ boolean isDirectory(String path) throws S3Exception {
String dirPath = sanitizedDirPath(path);
try {
- ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, dirPath);
- String contentType = objectMetadata.getContentType();
-
- return !StringUtils.isEmpty(contentType) && (contentType.equalsIgnoreCase(S3_DIR_CONTENT_TYPE));
- } catch (AmazonClientException ase) {
+ HeadObjectResponse dirResponse = getObjectMetadata(dirPath);
+ // SDK v2 Migration: Get the content type from the response object.
+ String contentType = dirResponse.contentType();
+ return !StringUtils.isEmpty(contentType) && contentType.equalsIgnoreCase(S3_DIR_CONTENT_TYPE);
+
+ } catch (NoSuchKeyException e) {
+ // SDK v2 Migration: Catch the specific `NoSuchKeyException` instead of the broad `AmazonClientException`.
+ // This indicates the directory marker object (e.g., "path/") was not found. Now, try the file path as a fallback.
String filePath = sanitizedFilePath(path);
try {
- ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, filePath);
- String contentType = objectMetadata.getContentType();
-
- return !StringUtils.isEmpty(contentType) && (contentType.equalsIgnoreCase(S3_DIR_CONTENT_TYPE));
- } catch (AmazonClientException e) {
- log.info("Could not get back {} from S3, tried both as a folder and as a file", path,e);
+ HeadObjectResponse fileResponse = getObjectMetadata(filePath);
+ String contentType = fileResponse.contentType();
+ return !StringUtils.isEmpty(contentType) && contentType.equalsIgnoreCase(S3_DIR_CONTENT_TYPE);
+ } catch (NoSuchKeyException ex) {
+ // The key doesn't exist as a directory marker or a file.
+ log.info("Could not find key for '{}' in S3, tried both as a folder and as a file.", path);
return false;
}
+ } catch (SdkException ase) {
+ // SDK v2 Migration: Catch the base `SdkException` for all other client or service errors.
+ throw handleAmazonException(ase);
}
}
@@ -325,14 +382,14 @@ boolean isDirectory(String path) throws S3Exception {
long length(String path) throws S3Exception {
path = sanitizedFilePath(path);
try {
- ObjectMetadata objectMetadata = s3Client.getObjectMetadata(bucketName, path);
- String contentType = objectMetadata.getContentType();
+ HeadObjectResponse objectMetaData = getObjectMetadata(path);
+ String contentType = objectMetaData.contentType();
if (StringUtils.isEmpty(contentType) || !contentType.equalsIgnoreCase(S3_DIR_CONTENT_TYPE)) {
- return objectMetadata.getContentLength();
+ return objectMetaData.contentLength();
}
throw new S3Exception("Path is Directory");
- } catch (AmazonClientException ase) {
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
@@ -347,10 +404,14 @@ InputStream pullStream(String path) throws S3Exception {
path = sanitizedFilePath(path);
try {
- S3Object requestedObject = s3Client.getObject(bucketName, path);
+ ResponseInputStream requestedObject = s3Client
+ .getObject(GetObjectRequest.builder()
+ .bucket(this.configuration.getBucketName())
+ .key(path)
+ .build());
// This InputStream instance needs to be closed by the caller
- return requestedObject.getObjectContent();
- } catch (AmazonClientException ase) {
+ return requestedObject;
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
@@ -369,8 +430,8 @@ OutputStream pushStream(String path) throws S3Exception {
}
try {
- return new S3OutputStream(s3Client, path, bucketName);
- } catch (AmazonClientException ase) {
+ return new S3OutputStream(s3Client, path, this.configuration);
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
@@ -379,7 +440,7 @@ OutputStream pushStream(String path) throws S3Exception {
* Override {@link Closeable} since we throw no exception.
*/
void close() {
- s3Client.shutdown();
+ s3Client.close();
}
/**
@@ -394,7 +455,7 @@ private Collection deleteObjects(Collection paths) throws S3Exce
* However, there's no guarantee the delete did not happen if an exception is thrown.
*/
return deleteObjects(paths, MAX_KEYS_PER_BATCH_DELETE);
- } catch (AmazonClientException ase) {
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
@@ -407,26 +468,35 @@ private Collection deleteObjects(Collection paths) throws S3Exce
*/
@VisibleForTesting
Collection deleteObjects(Collection entries, int batchSize) throws S3Exception {
- List keysToDelete =
- entries.stream().map(KeyVersion::new).collect(Collectors.toList());
+ /*
+ * SDK v2 Migration: Replaced the v1 `KeyVersion` class with the v2 `ObjectIdentifier`.
+ * `ObjectIdentifier` is the standard way to specify a key for batch operations.
+ */
+ List keysToDelete = entries.stream()
+ .map(key -> ObjectIdentifier.builder().key(key).build())
+ .collect(Collectors.toList());
- keysToDelete.sort(Comparator.comparing(KeyVersion::getKey).reversed());
- List> partitions = Lists.partition(keysToDelete, batchSize);
+ keysToDelete.sort(Comparator.comparing(ObjectIdentifier::key).reversed());
+ List> partitions = Lists.partition(keysToDelete, batchSize);
Set deletedPaths = new HashSet<>();
boolean deleteIndividually = false;
- for (List partition : partitions) {
+ for (List partition : partitions) {
DeleteObjectsRequest request = createBatchDeleteRequest(partition);
try {
- DeleteObjectsResult result = s3Client.deleteObjects(request);
-
- result.getDeletedObjects().stream()
- .map(DeleteObjectsResult.DeletedObject::getKey)
+ DeleteObjectsResponse result = s3Client.deleteObjects(request);
+
+ /*
+ * SDK v2 Migration: The response object's method to get deleted items is `deleted()`,
+ * not `deletedObjects()`. The items in the list are of type `DeletedObject`.
+ */
+ result.deleted().stream()
+ .map(DeletedObject::key)
.forEach(deletedPaths::add);
- } catch (AmazonServiceException e) {
+ } catch (AwsServiceException e) {
// This means that the batch-delete is not implemented by this S3 server
- if (e.getStatusCode() == 501) {
+ if (e.awsErrorDetails().sdkHttpResponse().statusCode() == 501) {
deleteIndividually = true;
break;
} else {
@@ -436,12 +506,16 @@ Collection deleteObjects(Collection entries, int batchSize) thro
}
if (deleteIndividually) {
- for (KeyVersion k : keysToDelete) {
+ for (ObjectIdentifier k : keysToDelete) {
try {
- s3Client.deleteObject(bucketName, k.getKey());
- deletedPaths.add(k.getKey());
- } catch (AmazonClientException e) {
- throw new S3Exception("Could not delete object with key: " + k.getKey(), e);
+ s3Client.deleteObject(DeleteObjectRequest
+ .builder()
+ .bucket(this.configuration.getBucketName())
+ .key(k.key())
+ .build());
+ deletedPaths.add(k.key());
+ } catch (SdkException e) {
+ throw new S3Exception("Could not delete object with key: " + k.key(), e);
}
}
}
@@ -449,38 +523,56 @@ Collection deleteObjects(Collection entries, int batchSize) thro
return deletedPaths;
}
- private DeleteObjectsRequest createBatchDeleteRequest(List keysToDelete) {
- return new DeleteObjectsRequest(bucketName).withKeys(keysToDelete);
+ private DeleteObjectsRequest createBatchDeleteRequest(List keysToDelete) {
+ /*
+ * SDK v2 Migration: The request requires a `Delete` object that wraps the list
+ * of `ObjectIdentifier`s, instead of passing the list directly to the request builder.
+ */
+ Delete deleteAction = Delete.builder()
+ .objects(keysToDelete)
+ .build();
+
+ return DeleteObjectsRequest.builder()
+ .bucket(this.configuration.getBucketName())
+ .delete(deleteAction)
+ .build();
}
private List listAll(String path) throws S3Exception {
String prefix = sanitizedDirPath(path);
- ListObjectsRequest listRequest =
- new ListObjectsRequest().withBucketName(bucketName).withPrefix(prefix);
+
+ /*
+ * SDK v2 Migration: Switched from the generic `ListObjectsRequest` to the recommended
+ * `ListObjectsV2Request`.
+ */
+ ListObjectsV2Request listRequest = ListObjectsV2Request.builder()
+ .bucket(this.configuration.getBucketName())
+ .prefix(prefix)
+ .build();
List entries = new ArrayList<>();
try {
- ObjectListing objectListing = s3Client.listObjects(listRequest);
-
- while (true) {
- List files =
- objectListing.getObjectSummaries().stream()
- .map(S3ObjectSummary::getKey)
- // This filtering is needed only for S3mock. Real S3 does not ignore the trailing
- // '/' in the prefix.
- .filter(s -> s.startsWith(prefix))
- .collect(Collectors.toList());
-
- entries.addAll(files);
-
- if (objectListing.isTruncated()) {
- objectListing = s3Client.listNextBatchOfObjects(objectListing);
- } else {
- break;
- }
- }
+ /*
+ * SDK v2 Migration: Replaced the manual `while` loop and the non-existent
+ * `listNextBatchOfObjects` method with the idiomatic v2 paginator.
+ * The `listObjectsV2Paginator` automatically handles the logic of fetching
+ * subsequent pages of results until all objects are listed.
+ */
+ s3Client.listObjectsV2Paginator(listRequest).forEach(page -> {
+ /*
+ * SDK v2 Migration: The object list in the response is accessed via
+ * the `contents()` method, which replaces the v1 `objectSummaries()`.
+ */
+ List files = page.contents().stream()
+ .map(S3Object::key)
+ // This application-specific filtering logic is preserved.
+ .filter(s -> s.startsWith(prefix))
+ .collect(Collectors.toList());
+
+ entries.addAll(files);
+ });
return entries;
- } catch (AmazonClientException ase) {
+ } catch (SdkException ase) {
throw handleAmazonException(ase);
}
}
@@ -572,31 +664,41 @@ String sanitizedDirPath(String path) throws S3Exception {
* Best effort to handle Amazon exceptions as checked exceptions. Amazon exception are all
* subclasses of {@link RuntimeException} so some may still be uncaught and propagated.
*/
- static S3Exception handleAmazonException(AmazonClientException ace) {
+ static S3Exception handleAmazonException(SdkException ace) {
- if (ace instanceof AmazonServiceException) {
- AmazonServiceException ase = (AmazonServiceException) ace;
+ // Check if the exception is a service-side error from AWS.
+ if (ace instanceof AwsServiceException) {
+ AwsServiceException ase = (AwsServiceException) ace;
+
+ /*
+ * SDK v2 Migration:
+ * - Replaced `ase.awsErrorDetails().sdkHttpResponse().statusCode()` with the simpler `ase.statusCode()`.
+ * - The `getErrorType()` method from v1 (which returned an enum like Client/Service) does not exist in v2.
+ * The fact that we are in this block means it's a service-side error, so we can hardcode "Service"
+ * to maintain the log's structure.
+ */
String errMessage =
String.format(
Locale.ROOT,
"An AmazonServiceException was thrown! [serviceName=%s] "
+ "[awsRequestId=%s] [httpStatus=%s] [s3ErrorCode=%s] [s3ErrorType=%s] [message=%s]",
- ase.getServiceName(),
- ase.getRequestId(),
- ase.getStatusCode(),
- ase.getErrorCode(),
- ase.getErrorType(),
- ase.getErrorMessage());
+ ase.awsErrorDetails().serviceName(),
+ ase.requestId(),
+ ase.statusCode(), // Simplified accessor for status code.
+ ase.awsErrorDetails().errorCode(),
+ "Service", // Replaced getErrorType()
+ ase.awsErrorDetails().errorMessage());
log.error(errMessage);
- if (ase.getStatusCode() == 404 && NOT_FOUND_CODES.contains(ase.getErrorCode())) {
+ if (ase.statusCode() == 404 && NOT_FOUND_CODES.contains(ase.awsErrorDetails().errorCode())) {
return new S3NotFoundException(errMessage, ase);
} else {
return new S3Exception(errMessage, ase);
}
}
+ // Handles client-side exceptions (e.g., network issues) or other SDK errors.
return new S3Exception(ace);
}
}