11#include " nix/store/s3-binary-cache-store.hh"
22#include " nix/store/http-binary-cache-store.hh"
33#include " nix/store/store-registration.hh"
4+ #include " nix/util/error.hh"
5+ #include " nix/util/logging.hh"
6+ #include " nix/util/compression.hh"
7+ #include " nix/util/serialise.hh"
8+ #include " nix/util/util.hh"
49
510#include < cassert>
611#include < ranges>
712#include < regex>
813
914namespace nix {
1015
16+ MakeError (UploadToS3, Error);
17+
18+ static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024 ; // 5GiB
19+
1120class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
1221{
1322public:
@@ -28,6 +37,21 @@ class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
2837private:
2938 ref<S3BinaryCacheStoreConfig> s3Config;
3039
40+ /* *
41+ * Uploads a file to S3 using a regular (non-multipart) upload.
42+ *
43+ * This method is suitable for files up to 5GiB in size. For larger files,
44+ * multipart upload should be used instead.
45+ *
46+ * @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
47+ */
48+ void upload (
49+ std::string_view path,
50+ std::shared_ptr<std::basic_iostream<char >> istream,
51+ const uint64_t sizeHint,
52+ std::string_view mimeType,
53+ std::optional<std::string_view> contentEncoding);
54+
3155 /* *
3256 * Creates a multipart upload for large objects to S3.
3357 *
@@ -61,7 +85,42 @@ void S3BinaryCacheStore::upsertFile(
6185 const std::string & mimeType,
6286 uint64_t sizeHint)
6387{
64- HttpBinaryCacheStore::upsertFile (path, istream, mimeType, sizeHint);
88+ auto compressionMethod = getCompressionMethod (path);
89+ std::optional<std::string> contentEncoding = std::nullopt ;
90+
91+ if (compressionMethod) {
92+ auto compressedData = compress (*compressionMethod, StreamToSourceAdapter (istream).drain ());
93+ sizeHint = compressedData.size ();
94+ istream = std::make_shared<std::stringstream>(std::move (compressedData));
95+ contentEncoding = compressionMethod;
96+ }
97+
98+ upload (path, istream, sizeHint, mimeType, contentEncoding);
99+ }
100+
101+ void S3BinaryCacheStore::upload (
102+ std::string_view path,
103+ std::shared_ptr<std::basic_iostream<char >> istream,
104+ const uint64_t sizeHint,
105+ std::string_view mimeType,
106+ std::optional<std::string_view> contentEncoding)
107+ {
108+ debug (" using S3 regular upload for '%s' (%d bytes)" , path, sizeHint);
109+ if (sizeHint > AWS_MAX_PART_SIZE)
110+ throw Error (
111+ " file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload." ,
112+ renderSize (sizeHint),
113+ renderSize (AWS_MAX_PART_SIZE));
114+
115+ auto data = StreamToSourceAdapter (istream).drain ();
116+
117+ try {
118+ uploadData (path, std::move (data), mimeType, contentEncoding);
119+ } catch (FileTransferError & e) {
120+ UploadToS3 err (e.message ());
121+ err.addTrace ({}, " while uploading to S3 binary cache at '%s'" , config->cacheUri .to_string ());
122+ throw err;
123+ }
65124}
66125
67126std::string S3BinaryCacheStore::createMultipartUpload (
0 commit comments