Skip to content

Commit d8c8e79

Browse files
committed
refactor(libstore/s3-binary-cache-store): implement upload()
Stop delegating to `HttpBinaryCacheStore::upsertFile` and instead handle compression in the S3 store's `upsertFile` override, then call our own `upload()` method. This separation is necessary for future multipart upload support.
1 parent 93fe335 commit d8c8e79

File tree

1 file changed

+63
-1
lines changed

1 file changed

+63
-1
lines changed

src/libstore/s3-binary-cache-store.cc

Lines changed: 63 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,11 @@
11
#include "nix/store/s3-binary-cache-store.hh"
22
#include "nix/store/http-binary-cache-store.hh"
33
#include "nix/store/store-registration.hh"
4+
#include "nix/util/error.hh"
5+
#include "nix/util/logging.hh"
6+
#include "nix/util/compression.hh"
7+
#include "nix/util/serialise.hh"
8+
#include "nix/util/util.hh"
49

510
#include <cassert>
611
#include <ranges>
@@ -9,6 +14,10 @@
914

1015
namespace nix {
1116

17+
MakeError(UploadToS3, Error);
18+
19+
static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024; // 5GiB
20+
1221
class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
1322
{
1423
public:
@@ -26,6 +35,26 @@ class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
2635
private:
2736
ref<S3BinaryCacheStoreConfig> s3Config;
2837

38+
/**
39+
* Uploads a file to S3 using a regular (non-multipart) upload.
40+
*
41+
* This method is suitable for files up to 5GiB in size. For larger files,
42+
* multipart upload should be used instead.
43+
*
44+
* @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
45+
*/
46+
void upload(
47+
std::string_view path,
48+
RestartableSource & source,
49+
uint64_t sizeHint,
50+
std::string_view mimeType,
51+
std::optional<std::string_view> contentEncoding);
52+
53+
/**
54+
* Uploads a file to S3 (CompressedSource overload).
55+
*/
56+
void upload(std::string_view path, CompressedSource & source, std::string_view mimeType);
57+
2958
/**
3059
* Creates a multipart upload for large objects to S3.
3160
*
@@ -69,7 +98,40 @@ class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
6998
void S3BinaryCacheStore::upsertFile(
7099
const std::string & path, RestartableSource & source, const std::string & mimeType, uint64_t sizeHint)
71100
{
72-
HttpBinaryCacheStore::upsertFile(path, source, mimeType, sizeHint);
101+
if (auto compressionMethod = getCompressionMethod(path)) {
102+
CompressedSource compressed(source, *compressionMethod);
103+
upload(path, compressed, mimeType);
104+
} else {
105+
upload(path, source, sizeHint, mimeType, std::nullopt);
106+
}
107+
}
108+
109+
void S3BinaryCacheStore::upload(
110+
std::string_view path,
111+
RestartableSource & source,
112+
uint64_t sizeHint,
113+
std::string_view mimeType,
114+
std::optional<std::string_view> contentEncoding)
115+
{
116+
debug("using S3 regular upload for '%s' (%d bytes)", path, sizeHint);
117+
if (sizeHint > AWS_MAX_PART_SIZE)
118+
throw Error(
119+
"file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload.",
120+
renderSize(sizeHint),
121+
renderSize(AWS_MAX_PART_SIZE));
122+
123+
try {
124+
HttpBinaryCacheStore::upload(path, source, sizeHint, mimeType, contentEncoding);
125+
} catch (FileTransferError & e) {
126+
UploadToS3 err(e.message());
127+
err.addTrace({}, "while uploading to S3 binary cache at '%s'", config->cacheUri.to_string());
128+
throw err;
129+
}
130+
}
131+
132+
void S3BinaryCacheStore::upload(std::string_view path, CompressedSource & source, std::string_view mimeType)
133+
{
134+
upload(path, static_cast<RestartableSource &>(source), source.size(), mimeType, source.getCompressionMethod());
73135
}
74136

75137
std::string S3BinaryCacheStore::createMultipartUpload(

0 commit comments

Comments
 (0)