diff --git a/src/Squirrel/ReleaseEntry.cs b/src/Squirrel/ReleaseEntry.cs
index 258f95be4..b09fa399b 100644
--- a/src/Squirrel/ReleaseEntry.cs
+++ b/src/Squirrel/ReleaseEntry.cs
@@ -356,6 +356,17 @@ static string stagingPercentageAsString(float percentage)
return String.Format("{0:F0}%", percentage * 100.0);
}
+ ///
+ public override string ToString()
+ {
+ return Filename;
+ }
+
+ ///
+ public override int GetHashCode()
+ {
+ return Filename.GetHashCode();
+ }
///
/// Given a list of releases and a specified release package, returns the release package
diff --git a/src/SquirrelCli/Options.cs b/src/SquirrelCli/Options.cs
index a89d95027..89d288333 100644
--- a/src/SquirrelCli/Options.cs
+++ b/src/SquirrelCli/Options.cs
@@ -203,37 +203,41 @@ public SyncBackblazeOptions()
public override void Validate()
{
IsRequired(nameof(b2KeyId), nameof(b2AppKey), nameof(b2BucketId));
+ Log.Warn("Provider 'b2' is being deprecated and will no longer be updated.");
+ Log.Warn("The replacement is using the 's3' provider with BackBlaze B2 using the '--endpoint' option.");
}
}
internal class SyncS3Options : BaseOptions
{
- public string key { get; private set; }
+ public string keyId { get; private set; }
public string secret { get; private set; }
public string region { get; private set; }
- public string endpointUrl { get; private set; }
+ public string endpoint { get; private set; }
public string bucket { get; private set; }
public string pathPrefix { get; private set; }
public bool overwrite { get; private set; }
+ public int keepMaxReleases { get; private set; }
- public SyncS3Options()
+ public SyncS3Options()
{
- Add("key=", "Authentication {IDENTIFIER} or access key", v => key = v);
+ Add("keyId=", "Authentication {IDENTIFIER} or access key", v => keyId = v);
Add("secret=", "Authentication secret {KEY}", v => secret = v);
Add("region=", "AWS service {REGION} (eg. us-west-1)", v => region = v);
- Add("endpointUrl=", "Custom service {URL} (from backblaze, digital ocean, etc)", v => endpointUrl = v);
- Add("bucket=", "{NAME} of the S3 bucket to access", v => bucket = v);
- Add("pathPrefix=", "A sub-folder {PATH} to read and write files in", v => pathPrefix = v);
- Add("overwrite", "Replace any mismatched remote files with files in local directory", v => overwrite = true);
+ Add("endpoint=", "Custom service {URL} (backblaze, digital ocean, etc)", v => endpoint = v);
+ Add("bucket=", "{NAME} of the S3 bucket", v => bucket = v);
+ Add("pathPrefix=", "A sub-folder {PATH} used for files in the bucket, for creating release channels (eg. 'stable' or 'dev')", v => pathPrefix = v);
+ Add("overwrite", "Replace existing files if source has changed", v => overwrite = true);
+ Add("keepMaxReleases=", "Applies a retention policy during upload which keeps only the specified {NUMBER} of old versions",
+ v => keepMaxReleases = ParseIntArg(nameof(keepMaxReleases), v));
}
public override void Validate()
{
- IsRequired(nameof(secret), nameof(key), nameof(bucket));
- IsValidUrl(nameof(endpointUrl));
+ IsRequired(nameof(secret), nameof(keyId), nameof(bucket));
- if ((region == null) == (endpointUrl == null)) {
- throw new OptionValidationException("One of 'region' and 'endpoint' arguments is required and are also mutually exclusive. Specify one of these. ");
+ if ((region == null) == (endpoint == null)) {
+ throw new OptionValidationException("One of 'region' and 'endpoint' arguments is required and are also mutually exclusive. Specify only one of these. ");
}
if (region != null) {
diff --git a/src/SquirrelCli/SquirrelCli.csproj b/src/SquirrelCli/SquirrelCli.csproj
index 2c61b9625..f2cfa764f 100644
--- a/src/SquirrelCli/SquirrelCli.csproj
+++ b/src/SquirrelCli/SquirrelCli.csproj
@@ -25,6 +25,7 @@
+
diff --git a/src/SquirrelCli/Sync/S3Repository.cs b/src/SquirrelCli/Sync/S3Repository.cs
index a4205179a..ac7f85ac1 100644
--- a/src/SquirrelCli/Sync/S3Repository.cs
+++ b/src/SquirrelCli/Sync/S3Repository.cs
@@ -27,10 +27,10 @@ public S3Repository(SyncS3Options options)
_options = options;
if (options.region != null) {
var r = RegionEndpoint.GetBySystemName(options.region);
- _client = new AmazonS3Client(_options.key, _options.secret, r);
- } else if (options.endpointUrl != null) {
- var config = new AmazonS3Config() { ServiceURL = _options.endpointUrl };
- _client = new AmazonS3Client(_options.key, _options.secret, config);
+ _client = new AmazonS3Client(_options.keyId, _options.secret, r);
+ } else if (options.endpoint != null) {
+ var config = new AmazonS3Config() { ServiceURL = _options.endpoint };
+ _client = new AmazonS3Client(_options.keyId, _options.secret, config);
} else {
throw new InvalidOperationException("Missing endpoint");
}
@@ -69,52 +69,186 @@ public async Task DownloadRecentPackages()
public async Task UploadMissingPackages()
{
+ Log.Info($"Uploading releases from '{_options.releaseDir}' to S3 bucket '{_options.bucket}'"
+ + (String.IsNullOrWhiteSpace(_prefix) ? "" : " with prefix '" + _prefix + "'"));
+
var releasesDir = new DirectoryInfo(_options.releaseDir);
- var files = releasesDir.GetFiles();
- var setupFile = files.Where(f => f.FullName.EndsWith("Setup.exe")).SingleOrDefault();
- var releasesFile = files.Where(f => f.Name == "RELEASES").SingleOrDefault();
- var filesWithoutSpecial = files.Except(new[] { setupFile, releasesFile });
+ // locate files to upload
+ var files = releasesDir.GetFiles("*", SearchOption.TopDirectoryOnly);
+ var msiFile = files.Where(f => f.FullName.EndsWith(".msi", StringComparison.InvariantCultureIgnoreCase)).SingleOrDefault();
+ var setupFile = files.Where(f => f.FullName.EndsWith("Setup.exe", StringComparison.InvariantCultureIgnoreCase))
+ .ContextualSingle("release directory", "Setup.exe file");
+ var releasesFile = files.Where(f => f.Name.Equals("RELEASES", StringComparison.InvariantCultureIgnoreCase))
+ .ContextualSingle("release directory", "RELEASES file");
+ var nupkgFiles = files.Where(f => f.FullName.EndsWith(".nupkg", StringComparison.InvariantCultureIgnoreCase)).ToArray();
- foreach (var f in filesWithoutSpecial) {
- string key = _prefix + f.Name;
- string deleteOldVersionId = null;
+ // apply retention policy. count '-full' versions only, then also remove corresponding delta packages
+ var releaseEntries = ReleaseEntry.ParseReleaseFile(File.ReadAllText(releasesFile.FullName))
+ .OrderBy(k => k.Version)
+ .ThenBy(k => !k.IsDelta)
+ .ToArray();
- try {
- var metadata = await _client.GetObjectMetadataAsync(_options.bucket, key);
- var md5 = GetFileMD5Checksum(f.FullName);
- var stored = metadata?.ETag?.Trim().Trim('"');
-
- if (stored != null) {
- if (stored.Equals(md5, StringComparison.InvariantCultureIgnoreCase)) {
- Log.Info($"Skipping '{f.FullName}', matching file exists in remote.");
- continue;
- } else if (_options.overwrite) {
- Log.Info($"File '{f.FullName}' exists in remote, replacing...");
- deleteOldVersionId = metadata.VersionId;
- } else {
- Log.Warn($"File '{f.FullName}' exists in remote and checksum does not match. Use 'overwrite' argument to replace remote file.");
- continue;
- }
+ var fullCount = releaseEntries.Where(r => !r.IsDelta).Count();
+ if (_options.keepMaxReleases > 0 && fullCount > _options.keepMaxReleases) {
+ Log.Info($"Retention Policy: {fullCount - _options.keepMaxReleases} releases will be removed from RELEASES file.");
+
+ var fullReleases = releaseEntries
+ .OrderByDescending(k => k.Version)
+ .Where(k => !k.IsDelta)
+ .Take(_options.keepMaxReleases)
+ .ToArray();
+
+ var deltaReleases = releaseEntries
+ .OrderByDescending(k => k.Version)
+ .Where(k => k.IsDelta)
+ .Where(k => fullReleases.Any(f => f.Version == k.Version))
+ .Where(k => k.Version != fullReleases.Last().Version) // ignore delta packages for the oldest full package
+ .ToArray();
+
+ Log.Info($"Total number of packages in remote after retention: {fullReleases.Length} full, {deltaReleases.Length} delta.");
+ fullCount = fullReleases.Length;
+
+ releaseEntries = fullReleases
+ .Concat(deltaReleases)
+ .OrderBy(k => k.Version)
+ .ThenBy(k => !k.IsDelta)
+ .ToArray();
+ ReleaseEntry.WriteReleaseFile(releaseEntries, releasesFile.FullName);
+ } else {
+ Log.Info($"There are currently {fullCount} full releases in RELEASES file.");
+ }
+
+ // we need to upload things in a certain order. If we upload 'RELEASES' first, for example, a client
+ // might try to request a nupkg that does not yet exist.
+
+ // upload nupkg's first
+ foreach (var f in nupkgFiles) {
+ if (!releaseEntries.Any(r => r.Filename.Equals(f.Name, StringComparison.InvariantCultureIgnoreCase))) {
+ Log.Warn($"Upload file '{f.Name}' skipped (not in RELEASES file)");
+ continue;
+ }
+ await UploadFile(f, _options.overwrite);
+ }
+
+ // next upload setup files
+ await UploadFile(setupFile, true);
+ if (msiFile != null) await UploadFile(msiFile, true);
+
+ // upload RELEASES
+ await UploadFile(releasesFile, true);
+
+ // ignore dead package cleanup if there is no retention policy
+ if (_options.keepMaxReleases > 0) {
+
+ // remove any dead packages (not in RELEASES) as they are undiscoverable anyway
+ Log.Info("Searching for remote dead packages (not in RELEASES file)");
+
+ var objects = await ListBucketContentsAsync(_client, _options.bucket).ToArrayAsync();
+ var deadObjectKeys = objects
+ .Select(o => o.Key)
+ .Where(o => o.EndsWith(".nupkg", StringComparison.InvariantCultureIgnoreCase))
+ .Where(o => o.StartsWith(_prefix, StringComparison.InvariantCultureIgnoreCase))
+ .Select(o => o.Substring(_prefix.Length))
+ .Where(o => !o.Contains('/')) // filters out objects in folders if _prefix is empty
+ .Where(o => !releaseEntries.Any(r => r.Filename.Equals(o, StringComparison.InvariantCultureIgnoreCase)))
+ .ToArray();
+
+ Log.Info($"Found {deadObjectKeys.Length} dead packages.");
+ foreach (var objKey in deadObjectKeys) {
+ await RetryAsync(() => _client.DeleteObjectAsync(new DeleteObjectRequest { BucketName = _options.bucket, Key = objKey }),
+ "Deleting dead package: " + objKey);
+ }
+ }
+
+ Log.Info("Done");
+
+ var endpoint = new Uri(_options.endpoint ?? RegionEndpoint.GetBySystemName(_options.region).GetEndpointForService("s3").Hostname);
+ var baseurl = $"https://{_options.bucket}.{endpoint.Host}/{_prefix}";
+ Log.Info($"Bucket URL: {baseurl}");
+ Log.Info($"Setup URL: {baseurl}{setupFile.Name}");
+ }
+
+ private static async IAsyncEnumerable ListBucketContentsAsync(IAmazonS3 client, string bucketName)
+ {
+ var request = new ListObjectsV2Request {
+ BucketName = bucketName,
+ MaxKeys = 100,
+ };
+
+ ListObjectsV2Response response;
+ do {
+ response = await client.ListObjectsV2Async(request);
+ foreach (var obj in response.S3Objects) {
+ yield return obj;
+ }
+
+ // If the response is truncated, set the request ContinuationToken
+ // from the NextContinuationToken property of the response.
+ request.ContinuationToken = response.NextContinuationToken;
+ }
+ while (response.IsTruncated);
+ }
+
+ private async Task UploadFile(FileInfo f, bool overwriteRemote)
+ {
+ string key = _prefix + f.Name;
+ string deleteOldVersionId = null;
+
+ // try to detect an existing remote file of the same name
+ try {
+ var metadata = await _client.GetObjectMetadataAsync(_options.bucket, key);
+ var md5 = GetFileMD5Checksum(f.FullName);
+ var stored = metadata?.ETag?.Trim().Trim('"');
+
+ if (stored != null) {
+ if (stored.Equals(md5, StringComparison.InvariantCultureIgnoreCase)) {
+ Log.Info($"Upload file '{f.Name}' skipped (already exists in remote)");
+ return;
+ } else if (overwriteRemote) {
+ Log.Info($"File '{f.Name}' exists in remote, replacing...");
+ deleteOldVersionId = metadata.VersionId;
+ } else {
+ Log.Warn($"File '{f.Name}' exists in remote and checksum does not match local file. Use 'overwrite' argument to replace remote file.");
+ return;
}
- } catch (AmazonS3Exception ex) when (ex.StatusCode == HttpStatusCode.NotFound) {
- // we don't care if the file does not exist, we're uploading!
}
+ } catch {
+ // don't care if this check fails. worst case, we end up re-uploading a file that
+ // already exists. storage providers should prefer the newer file of the same name.
+ }
+
+ var req = new PutObjectRequest {
+ BucketName = _options.bucket,
+ FilePath = f.FullName,
+ Key = key,
+ };
- var req = new PutObjectRequest {
- BucketName = _options.bucket,
- FilePath = f.FullName,
- Key = key,
- };
+ await RetryAsync(() => _client.PutObjectAsync(req), "Uploading " + f.Name);
- Log.Info("Uploading " + f.Name);
- var resp = await _client.PutObjectAsync(req);
- if ((int) resp.HttpStatusCode >= 300 || (int) resp.HttpStatusCode < 200)
- throw new Exception("Failed to upload with status code " + resp.HttpStatusCode);
+ if (deleteOldVersionId != null) {
+ await RetryAsync(() => _client.DeleteObjectAsync(_options.bucket, key, deleteOldVersionId),
+ "Removing old version of " + f.Name,
+ throwIfFail: false);
+ }
+ }
- if (deleteOldVersionId != null) {
- Log.Info("Deleting old version of " + f.Name);
- await _client.DeleteObjectAsync(_options.bucket, key, deleteOldVersionId);
+ private async Task RetryAsync(Func block, string message, bool throwIfFail = true, bool showMessageFirst = true)
+ {
+ int ctry = 0;
+ while (true) {
+ try {
+ if (showMessageFirst || ctry > 0)
+ Log.Info((ctry > 0 ? $"(retry {ctry}) " : "") + message);
+ await block().ConfigureAwait(false);
+ return;
+ } catch (Exception ex) {
+ if (ctry++ > 2) {
+ if (throwIfFail) throw;
+ else return;
+ }
+ Log.Error($"Error: {ex.Message}, retrying in 1 second.");
+ await Task.Delay(1000).ConfigureAwait(false);
}
}
}
diff --git a/src/SquirrelCli/ValidatedOptionSet.cs b/src/SquirrelCli/ValidatedOptionSet.cs
index e71e5dad5..b73c1471a 100644
--- a/src/SquirrelCli/ValidatedOptionSet.cs
+++ b/src/SquirrelCli/ValidatedOptionSet.cs
@@ -106,6 +106,14 @@ protected virtual void IsValidUrl(string propertyName)
throw new OptionValidationException(propertyName, "Must start with http or https and be a valid URI.");
}
+ protected virtual int ParseIntArg(string propertyName, string propertyValue)
+ {
+ if (int.TryParse(propertyValue, out var value))
+ return value;
+
+ throw new OptionValidationException(propertyName, "Must be a valid integer.");
+ }
+
public abstract void Validate();
public virtual void WriteOptionDescriptions()