11#include " nix/store/s3-binary-cache-store.hh"
22#include " nix/store/http-binary-cache-store.hh"
33#include " nix/store/store-registration.hh"
4+ #include " nix/util/error.hh"
5+ #include " nix/util/logging.hh"
6+ #include " nix/util/compression.hh"
7+ #include " nix/util/serialise.hh"
8+ #include " nix/util/util.hh"
49
510#include < cassert>
611#include < ranges>
712#include < regex>
813
914namespace nix {
1015
16+ static constexpr uint64_t AWS_MAX_PART_SIZE = 5ULL * 1024 * 1024 * 1024 ; // 5GiB
17+
1118class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
1219{
1320public:
@@ -28,6 +35,21 @@ class S3BinaryCacheStore : public virtual HttpBinaryCacheStore
2835private:
2936 ref<S3BinaryCacheStoreConfig> s3Config;
3037
38+ /* *
39+ * Uploads a file to S3 using a regular (non-multipart) upload.
40+ *
41+ * This method is suitable for files up to 5GiB in size. For larger files,
42+ * multipart upload should be used instead.
43+ *
44+ * @see https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html
45+ */
46+ void upload (
47+ std::string_view path,
48+ std::shared_ptr<std::basic_iostream<char >> istream,
49+ const uint64_t sizeHint,
50+ std::string_view mimeType,
51+ std::optional<std::string_view> contentEncoding);
52+
3153 /* *
3254 * Creates a multipart upload for large objects to S3.
3355 *
@@ -61,7 +83,45 @@ void S3BinaryCacheStore::upsertFile(
6183 const std::string & mimeType,
6284 uint64_t sizeHint)
6385{
64- HttpBinaryCacheStore::upsertFile (path, istream, mimeType, sizeHint);
86+ auto compressionMethod = getCompressionMethod (path);
87+ std::optional<std::string> contentEncoding = std::nullopt ;
88+
89+ if (compressionMethod) {
90+ auto compressedData = compress (*compressionMethod, StreamToSourceAdapter (istream).drain ());
91+ sizeHint = compressedData.size ();
92+ istream = std::make_shared<std::stringstream>(std::move (compressedData));
93+ contentEncoding = compressionMethod;
94+ }
95+
96+ upload (path, istream, sizeHint, mimeType, contentEncoding);
97+ }
98+
99+ void S3BinaryCacheStore::upload (
100+ std::string_view path,
101+ std::shared_ptr<std::basic_iostream<char >> istream,
102+ const uint64_t sizeHint,
103+ std::string_view mimeType,
104+ std::optional<std::string_view> contentEncoding)
105+ {
106+ debug (" using S3 regular upload for '%s' (%d bytes)" , path, sizeHint);
107+ if (sizeHint > AWS_MAX_PART_SIZE)
108+ throw Error (
109+ " file too large for S3 upload without multipart: %s would exceed maximum size of %s. Consider enabling multipart-upload." ,
110+ renderSize (sizeHint),
111+ renderSize (AWS_MAX_PART_SIZE));
112+
113+ auto req = makeRequest (path);
114+ auto data = StreamToSourceAdapter (istream).drain ();
115+ if (contentEncoding) {
116+ req.headers .emplace_back (" Content-Encoding" , *contentEncoding);
117+ }
118+ req.data = std::move (data);
119+ req.mimeType = mimeType;
120+ try {
121+ getFileTransfer ()->upload (req);
122+ } catch (FileTransferError & e) {
123+ throw Error (" while uploading to S3 binary cache at '%s': %s" , config->cacheUri .to_string (), e.msg ());
124+ }
65125}
66126
67127std::string S3BinaryCacheStore::createMultipartUpload (
0 commit comments