Skip to content

Commit 151d0e4

Browse files
committed
refactor(libstore/s3-binary-cache-store): implement upload()
This stops us from just delegating to `HttpBinaryCacheStore::upsertFile`, handling compression in our `upsertFile` override, which will be necessary for multipart uploads.
1 parent 60f9489 commit 151d0e4

File tree

1 file changed

+53
-1
lines changed

1 file changed

+53
-1
lines changed

src/libstore/s3-binary-cache-store.cc

Lines changed: 53 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,19 @@
11
#include "nix/store/s3-binary-cache-store.hh"
22
#include "nix/store/http-binary-cache-store.hh"
33
#include "nix/store/store-registration.hh"
4+
#include "nix/util/error.hh"
5+
#include "nix/util/logging.hh"
6+
#include "nix/util/compression.hh"
7+
#include "nix/util/serialise.hh"
8+
#include "nix/util/util.hh"
49

510
#include <cassert>
611
#include <ranges>
712

813
namespace nix {
914

15+
static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024; // 5GiB
16+
1017
class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
1118
{
1219
public:
@@ -26,6 +33,13 @@ class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
2633

2734
private:
2835
ref<S3BinaryCacheStoreConfig> s3Config;
36+
37+
void upload(
38+
std::string_view path,
39+
std::shared_ptr<std::basic_iostream<char>> istream,
40+
const uint64_t sizeHint,
41+
std::string_view mimeType,
42+
std::optional<std::string_view> contentEncoding);
2943
};
3044

3145
void S3BinaryCacheStore::upsertFile(
@@ -34,7 +48,45 @@ void S3BinaryCacheStore::upsertFile(
3448
const std::string & mimeType,
3549
uint64_t sizeHint)
3650
{
37-
HttpBinaryCacheStore::upsertFile(path, istream, mimeType, sizeHint);
51+
auto compressionMethod = getCompressionMethod(path);
52+
std::optional<std::string> contentEncoding = std::nullopt;
53+
54+
if (compressionMethod) {
55+
auto compressedData = compress(*compressionMethod, StreamToSourceAdapter(istream).drain());
56+
sizeHint = compressedData.size();
57+
istream = std::make_shared<std::stringstream>(std::move(compressedData));
58+
contentEncoding = compressionMethod;
59+
}
60+
61+
upload(path, istream, sizeHint, mimeType, contentEncoding);
62+
}
63+
64+
void S3BinaryCacheStore::upload(
65+
std::string_view path,
66+
std::shared_ptr<std::basic_iostream<char>> istream,
67+
const uint64_t sizeHint,
68+
std::string_view mimeType,
69+
std::optional<std::string_view> contentEncoding)
70+
{
71+
debug("using S3 regular upload for '%s' (%d bytes)", path, sizeHint);
72+
if (sizeHint > AWS_MAX_PART_SIZE)
73+
throw Error(
74+
"file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload.",
75+
renderSize(sizeHint),
76+
renderSize(AWS_MAX_PART_SIZE));
77+
78+
auto req = makeRequest(path);
79+
auto data = StreamToSourceAdapter(istream).drain();
80+
if (contentEncoding) {
81+
req.headers.emplace_back("Content-Encoding", *contentEncoding);
82+
}
83+
req.data = std::move(data);
84+
req.mimeType = mimeType;
85+
try {
86+
getFileTransfer()->upload(req);
87+
} catch (FileTransferError & e) {
88+
throw Error("while uploading to S3 binary cache at '%s': %s", config->cacheUri.to_string(), e.msg());
89+
}
3890
}
3991

4092
StringSet S3BinaryCacheStoreConfig::uriSchemes()

0 commit comments

Comments
 (0)