diff --git a/agent/client/client.go b/agent/client/client.go index 55575a1fb2e..0c937df6722 100644 --- a/agent/client/client.go +++ b/agent/client/client.go @@ -604,7 +604,7 @@ func (c *Client) handleStartJobRequest(p *agentv1.StartJobRequest) error { Port: int(j.MysqlBackup.Port), Socket: j.MysqlBackup.Socket, } - job = jobs.NewMySQLBackupJob(p.JobId, timeout, j.MysqlBackup.Name, dbConnCfg, locationConfig, j.MysqlBackup.Folder) + job = jobs.NewMySQLBackupJob(p.JobId, timeout, j.MysqlBackup.Name, dbConnCfg, locationConfig, j.MysqlBackup.Folder, j.MysqlBackup.Compression) case *agentv1.StartJobRequest_MysqlRestoreBackup: var locationConfig jobs.BackupLocationConfig @@ -622,7 +622,7 @@ func (c *Client) handleStartJobRequest(p *agentv1.StartJobRequest) error { return errors.Errorf("unknown location config: %T", j.MysqlRestoreBackup.LocationConfig) } - job = jobs.NewMySQLRestoreJob(p.JobId, timeout, j.MysqlRestoreBackup.Name, locationConfig, j.MysqlRestoreBackup.Folder) + job = jobs.NewMySQLRestoreJob(p.JobId, timeout, j.MysqlRestoreBackup.Name, locationConfig, j.MysqlRestoreBackup.Folder, j.MysqlRestoreBackup.Compression) case *agentv1.StartJobRequest_MongodbBackup: var locationConfig jobs.BackupLocationConfig @@ -651,7 +651,7 @@ func (c *Client) handleStartJobRequest(p *agentv1.StartJobRequest) error { } job, err = jobs.NewMongoDBBackupJob(p.JobId, timeout, j.MongodbBackup.Name, dsn, locationConfig, - j.MongodbBackup.EnablePitr, j.MongodbBackup.DataModel, j.MongodbBackup.Folder) + j.MongodbBackup.EnablePitr, j.MongodbBackup.DataModel, j.MongodbBackup.Folder, j.MongodbBackup.Compression) if err != nil { return err } @@ -684,7 +684,7 @@ func (c *Client) handleStartJobRequest(p *agentv1.StartJobRequest) error { job = jobs.NewMongoDBRestoreJob(p.JobId, timeout, j.MongodbRestoreBackup.Name, j.MongodbRestoreBackup.PitrTimestamp.AsTime(), dsn, locationConfig, - c.supervisor, j.MongodbRestoreBackup.Folder, j.MongodbRestoreBackup.PbmMetadata.Name) + c.supervisor, j.MongodbRestoreBackup.Folder, j.MongodbRestoreBackup.PbmMetadata.Name, j.MongodbRestoreBackup.Compression) default: return errors.Errorf("unknown job type: %T", j) } diff --git a/agent/runner/jobs/mongodb_backup_job.go b/agent/runner/jobs/mongodb_backup_job.go index b4ac5edd837..5d9f4f7f643 100644 --- a/agent/runner/jobs/mongodb_backup_job.go +++ b/agent/runner/jobs/mongodb_backup_job.go @@ -50,6 +50,7 @@ type MongoDBBackupJob struct { dataModel backuppb.DataModel jobLogger *pbmJobLogger folder string + compression backuppb.BackupCompression } // NewMongoDBBackupJob creates new Job for MongoDB backup. @@ -62,6 +63,7 @@ func NewMongoDBBackupJob( pitr bool, dataModel backuppb.DataModel, folder string, + compression backuppb.BackupCompression, ) (*MongoDBBackupJob, error) { if dataModel != backuppb.DataModel_DATA_MODEL_PHYSICAL && dataModel != backuppb.DataModel_DATA_MODEL_LOGICAL { return nil, errors.Errorf("'%s' is not a supported data model for MongoDB backups", dataModel) @@ -81,6 +83,7 @@ func NewMongoDBBackupJob( dataModel: dataModel, jobLogger: newPbmJobLogger(id, pbmBackupJob, dsn), folder: folder, + compression: compression, }, nil } @@ -216,6 +219,26 @@ func (j *MongoDBBackupJob) startBackup(ctx context.Context) (*pbmBackup, error) return nil, errors.Errorf("'%s' is not a supported data model for backups", j.dataModel) } + switch j.compression { + case backuppb.BackupCompression_BACKUP_COMPRESSION_DEFAULT: + case backuppb.BackupCompression_BACKUP_COMPRESSION_GZIP: + pbmArgs = append(pbmArgs, "--compression=gzip") + case backuppb.BackupCompression_BACKUP_COMPRESSION_SNAPPY: + pbmArgs = append(pbmArgs, "--compression=snappy") + case backuppb.BackupCompression_BACKUP_COMPRESSION_LZ4: + pbmArgs = append(pbmArgs, "--compression=lz4") + case backuppb.BackupCompression_BACKUP_COMPRESSION_S2: + pbmArgs = append(pbmArgs, "--compression=s2") + case backuppb.BackupCompression_BACKUP_COMPRESSION_PGZIP: + pbmArgs = append(pbmArgs, "--compression=pgzip") + case backuppb.BackupCompression_BACKUP_COMPRESSION_ZSTD: + pbmArgs = append(pbmArgs, "--compression=zstd") + case backuppb.BackupCompression_BACKUP_COMPRESSION_NONE: + pbmArgs = append(pbmArgs, "--compression=none") + default: + return nil, errors.Errorf("unknown compression: %s", j.compression) + } + if err := execPBMCommand(ctx, j.dsn, &result, pbmArgs...); err != nil { return nil, err } diff --git a/agent/runner/jobs/mongodb_backup_job_test.go b/agent/runner/jobs/mongodb_backup_job_test.go index 8f65c595f6f..146e3935bec 100644 --- a/agent/runner/jobs/mongodb_backup_job_test.go +++ b/agent/runner/jobs/mongodb_backup_job_test.go @@ -81,10 +81,11 @@ func TestNewMongoDBBackupJob(t *testing.T) { testJobDuration := 1 * time.Second tests := []struct { - name string - dataModel backuppb.DataModel - pitr bool - errMsg string + name string + dataModel backuppb.DataModel + pitr bool + errMsg string + compression backuppb.BackupCompression }{ { name: "logical backup model", @@ -107,12 +108,36 @@ func TestNewMongoDBBackupJob(t *testing.T) { dataModel: backuppb.DataModel_DATA_MODEL_PHYSICAL, errMsg: "PITR is only supported for logical backups", }, + { + name: "logical backup with LZ4 compression", + dataModel: backuppb.DataModel_DATA_MODEL_LOGICAL, + errMsg: "", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_LZ4, + }, + { + name: "physical backup with ZSTD compression", + dataModel: backuppb.DataModel_DATA_MODEL_LOGICAL, + errMsg: "", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_ZSTD, + }, + { + name: "logical backup with PGZIP compression", + dataModel: backuppb.DataModel_DATA_MODEL_LOGICAL, + errMsg: "", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_PGZIP, + }, + { + name: "physical backup with no compression", + dataModel: backuppb.DataModel_DATA_MODEL_LOGICAL, + errMsg: "", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_NONE, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { t.Parallel() - _, err := NewMongoDBBackupJob(t.Name(), testJobDuration, t.Name(), "", BackupLocationConfig{}, tc.pitr, tc.dataModel, "artifact_folder") + _, err := NewMongoDBBackupJob(t.Name(), testJobDuration, t.Name(), "", BackupLocationConfig{}, tc.pitr, tc.dataModel, "artifact_folder", tc.compression) if tc.errMsg == "" { assert.NoError(t, err) } else { @@ -121,3 +146,76 @@ func TestNewMongoDBBackupJob(t *testing.T) { }) } } + +func TestMongoDBBackupJobCompression(t *testing.T) { + t.Parallel() + testJobDuration := 1 * time.Second + + tests := []struct { + name string + compression backuppb.BackupCompression + shouldError bool + }{ + { + name: "GZIP compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_GZIP, + shouldError: false, + }, + { + name: "Snappy compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_SNAPPY, + shouldError: false, + }, + { + name: "LZ4 compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_LZ4, + shouldError: false, + }, + { + name: "S2 compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_S2, + shouldError: false, + }, + { + name: "PGZIP compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_PGZIP, + shouldError: false, + }, + { + name: "ZSTD compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_ZSTD, + shouldError: false, + }, + { + name: "None compression", + compression: backuppb.BackupCompression_BACKUP_COMPRESSION_NONE, + shouldError: false, + }, + } + + for _, tc := range tests { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + job, err := NewMongoDBBackupJob( + t.Name(), + testJobDuration, + t.Name(), + "", + BackupLocationConfig{}, + false, + backuppb.DataModel_DATA_MODEL_LOGICAL, + "artifact_folder", + tc.compression) + + if tc.shouldError { + assert.Error(t, err) + assert.Nil(t, job) + } else { + assert.NoError(t, err) + assert.NotNil(t, job) + assert.Equal(t, tc.compression, job.compression) + } + }) + } +} diff --git a/agent/runner/jobs/mongodb_restore_job.go b/agent/runner/jobs/mongodb_restore_job.go index fc2514df1c9..54f7fee2e9b 100644 --- a/agent/runner/jobs/mongodb_restore_job.go +++ b/agent/runner/jobs/mongodb_restore_job.go @@ -28,6 +28,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" agentv1 "github.com/percona/pmm/api/agent/v1" + backupv1 "github.com/percona/pmm/api/backup/v1" ) const ( @@ -48,6 +49,7 @@ type MongoDBRestoreJob struct { jobLogger *pbmJobLogger folder string pbmBackupName string + compression backupv1.BackupCompression } // NewMongoDBRestoreJob creates new Job for MongoDB backup restore. @@ -61,6 +63,7 @@ func NewMongoDBRestoreJob( restarter agentsRestarter, folder string, pbmBackupName string, + compression backupv1.BackupCompression, ) *MongoDBRestoreJob { return &MongoDBRestoreJob{ id: id, @@ -74,6 +77,7 @@ func NewMongoDBRestoreJob( jobLogger: newPbmJobLogger(id, pbmRestoreJob, dbConfig), folder: folder, pbmBackupName: pbmBackupName, + compression: compression, } } diff --git a/agent/runner/jobs/mysql_backup_job.go b/agent/runner/jobs/mysql_backup_job.go index c5a24605368..1c8a5a95d3b 100644 --- a/agent/runner/jobs/mysql_backup_job.go +++ b/agent/runner/jobs/mysql_backup_job.go @@ -46,10 +46,19 @@ type MySQLBackupJob struct { connConf DBConnConfig locationConfig BackupLocationConfig folder string + compression backuppb.BackupCompression } // NewMySQLBackupJob constructs new Job for MySQL backup. -func NewMySQLBackupJob(id string, timeout time.Duration, name string, connConf DBConnConfig, locationConfig BackupLocationConfig, folder string) *MySQLBackupJob { +func NewMySQLBackupJob( + id string, + timeout time.Duration, + name string, + connConf DBConnConfig, + locationConfig BackupLocationConfig, + folder string, + compression backuppb.BackupCompression, +) *MySQLBackupJob { return &MySQLBackupJob{ id: id, timeout: timeout, @@ -58,6 +67,7 @@ func NewMySQLBackupJob(id string, timeout time.Duration, name string, connConf D connConf: connConf, locationConfig: locationConfig, folder: folder, + compression: compression, } } @@ -119,8 +129,10 @@ func (j *MySQLBackupJob) binariesInstalled() error { return errors.Wrapf(err, "lookpath: %s", xtrabackupBin) } - if _, err := exec.LookPath(qpressBin); err != nil { - return errors.Wrapf(err, "lookpath: %s", qpressBin) + if j.compression == backuppb.BackupCompression_BACKUP_COMPRESSION_QUICKLZ { + if _, err := exec.LookPath(qpressBin); err != nil { + return errors.Wrapf(err, "lookpath: %s", qpressBin) + } } if j.locationConfig.Type == S3BackupLocationType { @@ -149,12 +161,25 @@ func (j *MySQLBackupJob) backup(ctx context.Context) (rerr error) { xtrabackupCmd := exec.CommandContext(pipeCtx, xtrabackupBin, - "--compress", "--backup", // Target dir is created, even though it's empty, because we are streaming it to cloud. // https://jira.percona.com/browse/PXB-2602 "--target-dir="+tmpDir) // #nosec G204 + switch j.compression { + case backuppb.BackupCompression_BACKUP_COMPRESSION_DEFAULT: + xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--compress") + case backuppb.BackupCompression_BACKUP_COMPRESSION_QUICKLZ: + xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--compress=quicklz") + case backuppb.BackupCompression_BACKUP_COMPRESSION_ZSTD: + xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--compress=zstd") + case backuppb.BackupCompression_BACKUP_COMPRESSION_LZ4: + xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--compress=lz4") + case backuppb.BackupCompression_BACKUP_COMPRESSION_NONE: + default: + return errors.Errorf("unknown compression: %s", j.compression) + } + if j.connConf.User != "" { xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--user="+j.connConf.User) xtrabackupCmd.Args = append(xtrabackupCmd.Args, "--password="+j.connConf.Password) diff --git a/agent/runner/jobs/mysql_restore_job.go b/agent/runner/jobs/mysql_restore_job.go index 72dae2f45fc..71a5c19c5ec 100644 --- a/agent/runner/jobs/mysql_restore_job.go +++ b/agent/runner/jobs/mysql_restore_job.go @@ -32,6 +32,7 @@ import ( "google.golang.org/protobuf/types/known/timestamppb" agentv1 "github.com/percona/pmm/api/agent/v1" + backupv1 "github.com/percona/pmm/api/backup/v1" ) const ( @@ -53,10 +54,18 @@ type MySQLRestoreJob struct { name string locationConfig BackupLocationConfig folder string + compression backupv1.BackupCompression } // NewMySQLRestoreJob constructs new Job for MySQL backup restore. -func NewMySQLRestoreJob(id string, timeout time.Duration, name string, locationConfig BackupLocationConfig, folder string) *MySQLRestoreJob { +func NewMySQLRestoreJob( + id string, + timeout time.Duration, + name string, + locationConfig BackupLocationConfig, + folder string, + compression backupv1.BackupCompression, +) *MySQLRestoreJob { return &MySQLRestoreJob{ id: id, timeout: timeout, @@ -64,6 +73,7 @@ func NewMySQLRestoreJob(id string, timeout time.Duration, name string, locationC name: name, locationConfig: locationConfig, folder: folder, + compression: compression, } } @@ -131,7 +141,7 @@ func (j *MySQLRestoreJob) Run(ctx context.Context, send Send) error { } } - if err := restoreBackup(ctx, tmpDir, mySQLDirectory); err != nil { + if err := restoreBackup(ctx, tmpDir, mySQLDirectory, j.compression); err != nil { return errors.WithStack(err) } @@ -163,8 +173,10 @@ func (j *MySQLRestoreJob) binariesInstalled() error { return errors.Wrapf(err, "lookpath: %s", xbstreamBin) } - if _, err := exec.LookPath(qpressBin); err != nil { - return errors.Wrapf(err, "lookpath: %s", qpressBin) + if j.compression == backupv1.BackupCompression_BACKUP_COMPRESSION_QUICKLZ { + if _, err := exec.LookPath(qpressBin); err != nil { + return errors.Wrapf(err, "lookpath: %s", qpressBin) + } } return nil @@ -370,17 +382,19 @@ func getPermissions(path string) (os.FileMode, error) { return info.Mode(), nil } -func restoreBackup(ctx context.Context, backupDirectory, mySQLDirectory string) error { +func restoreBackup(ctx context.Context, backupDirectory, mySQLDirectory string, compression backupv1.BackupCompression) error { // TODO We should implement recognizing correct default permissions based on DB configuration. // Setting default value in case the base MySQL folder have been lost. mysqlDirPermissions := os.FileMode(0o750) - if output, err := exec.CommandContext( //nolint:gosec - ctx, - xtrabackupBin, - "--decompress", - "--target-dir="+backupDirectory).CombinedOutput(); err != nil { - return errors.Wrapf(err, "failed to decompress, output: %s", string(output)) + if compression != backupv1.BackupCompression_BACKUP_COMPRESSION_NONE { + if output, err := exec.CommandContext( //nolint:gosec + ctx, + xtrabackupBin, + "--decompress", + "--target-dir="+backupDirectory).CombinedOutput(); err != nil { + return errors.Wrapf(err, "failed to decompress, output: %s", string(output)) + } } if output, err := exec.CommandContext( //nolint:gosec diff --git a/api/agent/v1/agent.pb.go b/api/agent/v1/agent.pb.go index e6e49e6c279..c781c37b781 100644 --- a/api/agent/v1/agent.pb.go +++ b/api/agent/v1/agent.pb.go @@ -5309,6 +5309,8 @@ type StartJobRequest_MySQLBackup struct { Name string `protobuf:"bytes,6,opt,name=name,proto3" json:"name,omitempty"` // Folder to store artifact on a storage. Folder string `protobuf:"bytes,7,opt,name=folder,proto3" json:"folder,omitempty"` + // Compression + Compression v11.BackupCompression `protobuf:"varint,8,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` // Backup target location. // // Types that are valid to be assigned to LocationConfig: @@ -5398,6 +5400,13 @@ func (x *StartJobRequest_MySQLBackup) GetFolder() string { return "" } +func (x *StartJobRequest_MySQLBackup) GetCompression() v11.BackupCompression { + if x != nil { + return x.Compression + } + return v11.BackupCompression(0) +} + func (x *StartJobRequest_MySQLBackup) GetLocationConfig() isStartJobRequest_MySQLBackup_LocationConfig { if x != nil { return x.LocationConfig @@ -5433,6 +5442,8 @@ type StartJobRequest_MySQLRestoreBackup struct { Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Folder to store artifact on a storage. Folder string `protobuf:"bytes,3,opt,name=folder,proto3" json:"folder,omitempty"` + // Compression + Compression v11.BackupCompression `protobuf:"varint,4,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` // Where backup is stored. // // Types that are valid to be assigned to LocationConfig: @@ -5494,6 +5505,13 @@ func (x *StartJobRequest_MySQLRestoreBackup) GetFolder() string { return "" } +func (x *StartJobRequest_MySQLRestoreBackup) GetCompression() v11.BackupCompression { + if x != nil { + return x.Compression + } + return v11.BackupCompression(0) +} + func (x *StartJobRequest_MySQLRestoreBackup) GetLocationConfig() isStartJobRequest_MySQLRestoreBackup_LocationConfig { if x != nil { return x.LocationConfig @@ -5537,6 +5555,8 @@ type StartJobRequest_MongoDBBackup struct { EnablePitr bool `protobuf:"varint,5,opt,name=enable_pitr,json=enablePitr,proto3" json:"enable_pitr,omitempty"` // Backup data model (physical or logical). DataModel v11.DataModel `protobuf:"varint,6,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"` + // Compression + Compression v11.BackupCompression `protobuf:"varint,7,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` // Backup target location. // // Types that are valid to be assigned to LocationConfig: @@ -5620,6 +5640,13 @@ func (x *StartJobRequest_MongoDBBackup) GetDataModel() v11.DataModel { return v11.DataModel(0) } +func (x *StartJobRequest_MongoDBBackup) GetCompression() v11.BackupCompression { + if x != nil { + return x.Compression + } + return v11.BackupCompression(0) +} + func (x *StartJobRequest_MongoDBBackup) GetLocationConfig() isStartJobRequest_MongoDBBackup_LocationConfig { if x != nil { return x.LocationConfig @@ -5678,6 +5705,8 @@ type StartJobRequest_MongoDBRestoreBackup struct { PbmMetadata *v11.PbmMetadata `protobuf:"bytes,5,opt,name=pbm_metadata,json=pbmMetadata,proto3" json:"pbm_metadata,omitempty"` // Point-in-Time recovery timestamp. PitrTimestamp *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=pitr_timestamp,json=pitrTimestamp,proto3" json:"pitr_timestamp,omitempty"` + // Compression + Compression v11.BackupCompression `protobuf:"varint,7,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` // Where backup is stored. // // Types that are valid to be assigned to LocationConfig: @@ -5761,6 +5790,13 @@ func (x *StartJobRequest_MongoDBRestoreBackup) GetPitrTimestamp() *timestamppb.T return nil } +func (x *StartJobRequest_MongoDBRestoreBackup) GetCompression() v11.BackupCompression { + if x != nil { + return x.Compression + } + return v11.BackupCompression(0) +} + func (x *StartJobRequest_MongoDBRestoreBackup) GetLocationConfig() isStartJobRequest_MongoDBRestoreBackup_LocationConfig { if x != nil { return x.LocationConfig @@ -6860,14 +6896,14 @@ const file_agent_v1_agent_proto_rawDesc = "" + "bucketName\x12#\n" + "\rbucket_region\x18\x05 \x01(\tR\fbucketRegion\".\n" + "\x18FilesystemLocationConfig\x12\x12\n" + - "\x04path\x18\x01 \x01(\tR\x04path\"\xd3\r\n" + + "\x04path\x18\x01 \x01(\tR\x04path\"\xd3\x0f\n" + "\x0fStartJobRequest\x12\x15\n" + "\x06job_id\x18\x01 \x01(\tR\x05jobId\x123\n" + "\atimeout\x18\x02 \x01(\v2\x19.google.protobuf.DurationR\atimeout\x12J\n" + "\fmysql_backup\x18\v \x01(\v2%.agent.v1.StartJobRequest.MySQLBackupH\x00R\vmysqlBackup\x12`\n" + "\x14mysql_restore_backup\x18\f \x01(\v2,.agent.v1.StartJobRequest.MySQLRestoreBackupH\x00R\x12mysqlRestoreBackup\x12P\n" + "\x0emongodb_backup\x18\r \x01(\v2'.agent.v1.StartJobRequest.MongoDBBackupH\x00R\rmongodbBackup\x12f\n" + - "\x16mongodb_restore_backup\x18\x0e \x01(\v2..agent.v1.StartJobRequest.MongoDBRestoreBackupH\x00R\x14mongodbRestoreBackup\x1a\x96\x02\n" + + "\x16mongodb_restore_backup\x18\x0e \x01(\v2..agent.v1.StartJobRequest.MongoDBRestoreBackupH\x00R\x14mongodbRestoreBackup\x1a\xd6\x02\n" + "\vMySQLBackup\x12\x12\n" + "\x04user\x18\x01 \x01(\tR\x04user\x12\x1a\n" + "\bpassword\x18\x02 \x01(\tR\bpassword\x12\x18\n" + @@ -6875,18 +6911,20 @@ const file_agent_v1_agent_proto_rawDesc = "" + "\x04port\x18\x04 \x01(\x05R\x04port\x12\x16\n" + "\x06socket\x18\x05 \x01(\tR\x06socket\x12\x12\n" + "\x04name\x18\x06 \x01(\tR\x04name\x12\x16\n" + - "\x06folder\x18\a \x01(\tR\x06folder\x129\n" + + "\x06folder\x18\a \x01(\tR\x06folder\x12>\n" + + "\vcompression\x18\b \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\x129\n" + "\ts3_config\x18\n" + " \x01(\v2\x1a.agent.v1.S3LocationConfigH\x00R\bs3ConfigB\x11\n" + - "\x0flocation_configJ\x04\b\v\x10\fR\x11filesystem_config\x1a\xc6\x01\n" + + "\x0flocation_configJ\x04\b\v\x10\fR\x11filesystem_config\x1a\x86\x02\n" + "\x12MySQLRestoreBackup\x12\x1d\n" + "\n" + "service_id\x18\x01 \x01(\tR\tserviceId\x12\x12\n" + "\x04name\x18\x02 \x01(\tR\x04name\x12\x16\n" + - "\x06folder\x18\x03 \x01(\tR\x06folder\x129\n" + + "\x06folder\x18\x03 \x01(\tR\x06folder\x12>\n" + + "\vcompression\x18\x04 \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\x129\n" + "\ts3_config\x18\n" + " \x01(\v2\x1a.agent.v1.S3LocationConfigH\x00R\bs3ConfigB\x11\n" + - "\x0flocation_configJ\x04\b\v\x10\fR\x11filesystem_config\x1a\xf8\x02\n" + + "\x0flocation_configJ\x04\b\v\x10\fR\x11filesystem_config\x1a\xb8\x03\n" + "\rMongoDBBackup\x12\x10\n" + "\x03dsn\x18\x01 \x01(\tR\x03dsn\x122\n" + "\n" + @@ -6896,11 +6934,12 @@ const file_agent_v1_agent_proto_rawDesc = "" + "\venable_pitr\x18\x05 \x01(\bR\n" + "enablePitr\x123\n" + "\n" + - "data_model\x18\x06 \x01(\x0e2\x14.backup.v1.DataModelR\tdataModel\x129\n" + + "data_model\x18\x06 \x01(\x0e2\x14.backup.v1.DataModelR\tdataModel\x12>\n" + + "\vcompression\x18\a \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\x129\n" + "\ts3_config\x18\n" + " \x01(\v2\x1a.agent.v1.S3LocationConfigH\x00R\bs3Config\x12Q\n" + "\x11filesystem_config\x18\v \x01(\v2\".agent.v1.FilesystemLocationConfigH\x00R\x10filesystemConfigB\x11\n" + - "\x0flocation_config\x1a\xa7\x03\n" + + "\x0flocation_config\x1a\xe7\x03\n" + "\x14MongoDBRestoreBackup\x12\x10\n" + "\x03dsn\x18\x01 \x01(\tR\x03dsn\x122\n" + "\n" + @@ -6908,7 +6947,8 @@ const file_agent_v1_agent_proto_rawDesc = "" + "\x04name\x18\x03 \x01(\tR\x04name\x12\x16\n" + "\x06folder\x18\x04 \x01(\tR\x06folder\x129\n" + "\fpbm_metadata\x18\x05 \x01(\v2\x16.backup.v1.PbmMetadataR\vpbmMetadata\x12A\n" + - "\x0epitr_timestamp\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\rpitrTimestamp\x129\n" + + "\x0epitr_timestamp\x18\x06 \x01(\v2\x1a.google.protobuf.TimestampR\rpitrTimestamp\x12>\n" + + "\vcompression\x18\a \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\x129\n" + "\ts3_config\x18\n" + " \x01(\v2\x1a.agent.v1.S3LocationConfigH\x00R\bs3Config\x12Q\n" + "\x11filesystem_config\x18\v \x01(\v2\".agent.v1.FilesystemLocationConfigH\x00R\x10filesystemConfigB\x11\n" + @@ -7155,9 +7195,10 @@ var ( (v1.ServiceType)(0), // 97: inventory.v1.ServiceType (*status.Status)(nil), // 98: google.rpc.Status (v1.AgentType)(0), // 99: inventory.v1.AgentType - (v11.DataModel)(0), // 100: backup.v1.DataModel - (*v11.PbmMetadata)(nil), // 101: backup.v1.PbmMetadata - (*v11.Metadata)(nil), // 102: backup.v1.Metadata + (v11.BackupCompression)(0), // 100: backup.v1.BackupCompression + (v11.DataModel)(0), // 101: backup.v1.DataModel + (*v11.PbmMetadata)(nil), // 102: backup.v1.PbmMetadata + (*v11.Metadata)(nil), // 103: backup.v1.Metadata } ) @@ -7283,32 +7324,36 @@ var file_agent_v1_agent_proto_depIdxs = []int32{ 2, // 118: agent.v1.StartActionRequest.MongoDBQueryReplSetGetStatusParams.text_files:type_name -> agent.v1.TextFiles 2, // 119: agent.v1.StartActionRequest.MongoDBQueryGetDiagnosticDataParams.text_files:type_name -> agent.v1.TextFiles 1, // 120: agent.v1.StartActionRequest.RestartSystemServiceParams.system_service:type_name -> agent.v1.StartActionRequest.RestartSystemServiceParams.SystemService - 32, // 121: agent.v1.StartJobRequest.MySQLBackup.s3_config:type_name -> agent.v1.S3LocationConfig - 32, // 122: agent.v1.StartJobRequest.MySQLRestoreBackup.s3_config:type_name -> agent.v1.S3LocationConfig - 2, // 123: agent.v1.StartJobRequest.MongoDBBackup.text_files:type_name -> agent.v1.TextFiles - 100, // 124: agent.v1.StartJobRequest.MongoDBBackup.data_model:type_name -> backup.v1.DataModel - 32, // 125: agent.v1.StartJobRequest.MongoDBBackup.s3_config:type_name -> agent.v1.S3LocationConfig - 33, // 126: agent.v1.StartJobRequest.MongoDBBackup.filesystem_config:type_name -> agent.v1.FilesystemLocationConfig - 2, // 127: agent.v1.StartJobRequest.MongoDBRestoreBackup.text_files:type_name -> agent.v1.TextFiles - 101, // 128: agent.v1.StartJobRequest.MongoDBRestoreBackup.pbm_metadata:type_name -> backup.v1.PbmMetadata - 93, // 129: agent.v1.StartJobRequest.MongoDBRestoreBackup.pitr_timestamp:type_name -> google.protobuf.Timestamp - 32, // 130: agent.v1.StartJobRequest.MongoDBRestoreBackup.s3_config:type_name -> agent.v1.S3LocationConfig - 33, // 131: agent.v1.StartJobRequest.MongoDBRestoreBackup.filesystem_config:type_name -> agent.v1.FilesystemLocationConfig - 102, // 132: agent.v1.JobResult.MongoDBBackup.metadata:type_name -> backup.v1.Metadata - 102, // 133: agent.v1.JobResult.MySQLBackup.metadata:type_name -> backup.v1.Metadata - 85, // 134: agent.v1.GetVersionsRequest.Software.mysqld:type_name -> agent.v1.GetVersionsRequest.MySQLd - 86, // 135: agent.v1.GetVersionsRequest.Software.xtrabackup:type_name -> agent.v1.GetVersionsRequest.Xtrabackup - 87, // 136: agent.v1.GetVersionsRequest.Software.xbcloud:type_name -> agent.v1.GetVersionsRequest.Xbcloud - 88, // 137: agent.v1.GetVersionsRequest.Software.qpress:type_name -> agent.v1.GetVersionsRequest.Qpress - 89, // 138: agent.v1.GetVersionsRequest.Software.mongod:type_name -> agent.v1.GetVersionsRequest.MongoDB - 90, // 139: agent.v1.GetVersionsRequest.Software.pbm:type_name -> agent.v1.GetVersionsRequest.PBM - 42, // 140: agent.v1.AgentService.Connect:input_type -> agent.v1.AgentMessage - 43, // 141: agent.v1.AgentService.Connect:output_type -> agent.v1.ServerMessage - 141, // [141:142] is the sub-list for method output_type - 140, // [140:141] is the sub-list for method input_type - 140, // [140:140] is the sub-list for extension type_name - 140, // [140:140] is the sub-list for extension extendee - 0, // [0:140] is the sub-list for field type_name + 100, // 121: agent.v1.StartJobRequest.MySQLBackup.compression:type_name -> backup.v1.BackupCompression + 32, // 122: agent.v1.StartJobRequest.MySQLBackup.s3_config:type_name -> agent.v1.S3LocationConfig + 100, // 123: agent.v1.StartJobRequest.MySQLRestoreBackup.compression:type_name -> backup.v1.BackupCompression + 32, // 124: agent.v1.StartJobRequest.MySQLRestoreBackup.s3_config:type_name -> agent.v1.S3LocationConfig + 2, // 125: agent.v1.StartJobRequest.MongoDBBackup.text_files:type_name -> agent.v1.TextFiles + 101, // 126: agent.v1.StartJobRequest.MongoDBBackup.data_model:type_name -> backup.v1.DataModel + 100, // 127: agent.v1.StartJobRequest.MongoDBBackup.compression:type_name -> backup.v1.BackupCompression + 32, // 128: agent.v1.StartJobRequest.MongoDBBackup.s3_config:type_name -> agent.v1.S3LocationConfig + 33, // 129: agent.v1.StartJobRequest.MongoDBBackup.filesystem_config:type_name -> agent.v1.FilesystemLocationConfig + 2, // 130: agent.v1.StartJobRequest.MongoDBRestoreBackup.text_files:type_name -> agent.v1.TextFiles + 102, // 131: agent.v1.StartJobRequest.MongoDBRestoreBackup.pbm_metadata:type_name -> backup.v1.PbmMetadata + 93, // 132: agent.v1.StartJobRequest.MongoDBRestoreBackup.pitr_timestamp:type_name -> google.protobuf.Timestamp + 100, // 133: agent.v1.StartJobRequest.MongoDBRestoreBackup.compression:type_name -> backup.v1.BackupCompression + 32, // 134: agent.v1.StartJobRequest.MongoDBRestoreBackup.s3_config:type_name -> agent.v1.S3LocationConfig + 33, // 135: agent.v1.StartJobRequest.MongoDBRestoreBackup.filesystem_config:type_name -> agent.v1.FilesystemLocationConfig + 103, // 136: agent.v1.JobResult.MongoDBBackup.metadata:type_name -> backup.v1.Metadata + 103, // 137: agent.v1.JobResult.MySQLBackup.metadata:type_name -> backup.v1.Metadata + 85, // 138: agent.v1.GetVersionsRequest.Software.mysqld:type_name -> agent.v1.GetVersionsRequest.MySQLd + 86, // 139: agent.v1.GetVersionsRequest.Software.xtrabackup:type_name -> agent.v1.GetVersionsRequest.Xtrabackup + 87, // 140: agent.v1.GetVersionsRequest.Software.xbcloud:type_name -> agent.v1.GetVersionsRequest.Xbcloud + 88, // 141: agent.v1.GetVersionsRequest.Software.qpress:type_name -> agent.v1.GetVersionsRequest.Qpress + 89, // 142: agent.v1.GetVersionsRequest.Software.mongod:type_name -> agent.v1.GetVersionsRequest.MongoDB + 90, // 143: agent.v1.GetVersionsRequest.Software.pbm:type_name -> agent.v1.GetVersionsRequest.PBM + 42, // 144: agent.v1.AgentService.Connect:input_type -> agent.v1.AgentMessage + 43, // 145: agent.v1.AgentService.Connect:output_type -> agent.v1.ServerMessage + 145, // [145:146] is the sub-list for method output_type + 144, // [144:145] is the sub-list for method input_type + 144, // [144:144] is the sub-list for extension type_name + 144, // [144:144] is the sub-list for extension extendee + 0, // [0:144] is the sub-list for field type_name } func init() { file_agent_v1_agent_proto_init() } diff --git a/api/agent/v1/agent.pb.validate.go b/api/agent/v1/agent.pb.validate.go index ed9da319c44..1f55b21ac4e 100644 --- a/api/agent/v1/agent.pb.validate.go +++ b/api/agent/v1/agent.pb.validate.go @@ -37,7 +37,7 @@ var ( _ = anypb.Any{} _ = sort.Sort - _ = backupv1.DataModel(0) + _ = backupv1.BackupCompression(0) _ = inventoryv1.AgentStatus(0) ) @@ -11345,6 +11345,8 @@ func (m *StartJobRequest_MySQLBackup) validate(all bool) error { // no validation rules for Folder + // no validation rules for Compression + switch v := m.LocationConfig.(type) { case *StartJobRequest_MySQLBackup_S3Config: if v == nil { @@ -11501,6 +11503,8 @@ func (m *StartJobRequest_MySQLRestoreBackup) validate(all bool) error { // no validation rules for Folder + // no validation rules for Compression + switch v := m.LocationConfig.(type) { case *StartJobRequest_MySQLRestoreBackup_S3Config: if v == nil { @@ -11690,6 +11694,8 @@ func (m *StartJobRequest_MongoDBBackup) validate(all bool) error { // no validation rules for DataModel + // no validation rules for Compression + switch v := m.LocationConfig.(type) { case *StartJobRequest_MongoDBBackup_S3Config: if v == nil { @@ -11974,6 +11980,8 @@ func (m *StartJobRequest_MongoDBRestoreBackup) validate(all bool) error { } } + // no validation rules for Compression + switch v := m.LocationConfig.(type) { case *StartJobRequest_MongoDBRestoreBackup_S3Config: if v == nil { diff --git a/api/agent/v1/agent.proto b/api/agent/v1/agent.proto index de206a8df16..71d1cce6f4c 100644 --- a/api/agent/v1/agent.proto +++ b/api/agent/v1/agent.proto @@ -524,6 +524,8 @@ message StartJobRequest { string name = 6; // Folder to store artifact on a storage. string folder = 7; + // Compression + backup.v1.BackupCompression compression = 8; // Backup target location. oneof location_config { S3LocationConfig s3_config = 10; @@ -541,6 +543,8 @@ message StartJobRequest { string name = 2; // Folder to store artifact on a storage. string folder = 3; + // Compression + backup.v1.BackupCompression compression = 4; // Where backup is stored. oneof location_config { S3LocationConfig s3_config = 10; @@ -561,6 +565,8 @@ message StartJobRequest { bool enable_pitr = 5; // Backup data model (physical or logical). backup.v1.DataModel data_model = 6; + // Compression + backup.v1.BackupCompression compression = 7; // Backup target location. oneof location_config { S3LocationConfig s3_config = 10; @@ -582,6 +588,8 @@ message StartJobRequest { backup.v1.PbmMetadata pbm_metadata = 5; // Point-in-Time recovery timestamp. google.protobuf.Timestamp pitr_timestamp = 6; + // Compression + backup.v1.BackupCompression compression = 7; // Where backup is stored. oneof location_config { S3LocationConfig s3_config = 10; diff --git a/api/backup/v1/artifacts.pb.go b/api/backup/v1/artifacts.pb.go index febec55858c..b9be37a2383 100644 --- a/api/backup/v1/artifacts.pb.go +++ b/api/backup/v1/artifacts.pb.go @@ -122,7 +122,9 @@ type Artifact struct { // Folder to store artifact on a storage. Folder string `protobuf:"bytes,13,opt,name=folder,proto3" json:"folder,omitempty"` // List of artifact metadata. - MetadataList []*Metadata `protobuf:"bytes,14,rep,name=metadata_list,json=metadataList,proto3" json:"metadata_list,omitempty"` + MetadataList []*Metadata `protobuf:"bytes,14,rep,name=metadata_list,json=metadataList,proto3" json:"metadata_list,omitempty"` + // Compression + Compression BackupCompression `protobuf:"varint,15,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -255,6 +257,13 @@ func (x *Artifact) GetMetadataList() []*Metadata { return nil } +func (x *Artifact) GetCompression() BackupCompression { + if x != nil { + return x.Compression + } + return BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED +} + type ListArtifactsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -572,7 +581,7 @@ var File_backup_v1_artifacts_proto protoreflect.FileDescriptor const file_backup_v1_artifacts_proto_rawDesc = "" + "\n" + - "\x19backup/v1/artifacts.proto\x12\tbackup.v1\x1a\x16backup/v1/common.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xab\x04\n" + + "\x19backup/v1/artifacts.proto\x12\tbackup.v1\x1a\x16backup/v1/common.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x17validate/validate.proto\"\xeb\x04\n" + "\bArtifact\x12\x1f\n" + "\vartifact_id\x18\x01 \x01(\tR\n" + "artifactId\x12\x12\n" + @@ -593,7 +602,8 @@ const file_backup_v1_artifacts_proto_rawDesc = "" + "\x04mode\x18\v \x01(\x0e2\x15.backup.v1.BackupModeR\x04mode\x12,\n" + "\x12is_sharded_cluster\x18\f \x01(\bR\x10isShardedCluster\x12\x16\n" + "\x06folder\x18\r \x01(\tR\x06folder\x128\n" + - "\rmetadata_list\x18\x0e \x03(\v2\x13.backup.v1.MetadataR\fmetadataList\"\x16\n" + + "\rmetadata_list\x18\x0e \x03(\v2\x13.backup.v1.MetadataR\fmetadataList\x12>\n" + + "\vcompression\x18\x0f \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\"\x16\n" + "\x14ListArtifactsRequest\"J\n" + "\x15ListArtifactsResponse\x121\n" + "\tartifacts\x18\x01 \x03(\v2\x13.backup.v1.ArtifactR\tartifacts\"d\n" + @@ -654,6 +664,7 @@ var ( (*timestamppb.Timestamp)(nil), // 10: google.protobuf.Timestamp (BackupMode)(0), // 11: backup.v1.BackupMode (*Metadata)(nil), // 12: backup.v1.Metadata + (BackupCompression)(0), // 13: backup.v1.BackupCompression } ) @@ -663,15 +674,16 @@ var file_backup_v1_artifacts_proto_depIdxs = []int32{ 10, // 2: backup.v1.Artifact.created_at:type_name -> google.protobuf.Timestamp 11, // 3: backup.v1.Artifact.mode:type_name -> backup.v1.BackupMode 12, // 4: backup.v1.Artifact.metadata_list:type_name -> backup.v1.Metadata - 1, // 5: backup.v1.ListArtifactsResponse.artifacts:type_name -> backup.v1.Artifact - 10, // 6: backup.v1.PitrTimerange.start_timestamp:type_name -> google.protobuf.Timestamp - 10, // 7: backup.v1.PitrTimerange.end_timestamp:type_name -> google.protobuf.Timestamp - 6, // 8: backup.v1.ListPitrTimerangesResponse.timeranges:type_name -> backup.v1.PitrTimerange - 9, // [9:9] is the sub-list for method output_type - 9, // [9:9] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 13, // 5: backup.v1.Artifact.compression:type_name -> backup.v1.BackupCompression + 1, // 6: backup.v1.ListArtifactsResponse.artifacts:type_name -> backup.v1.Artifact + 10, // 7: backup.v1.PitrTimerange.start_timestamp:type_name -> google.protobuf.Timestamp + 10, // 8: backup.v1.PitrTimerange.end_timestamp:type_name -> google.protobuf.Timestamp + 6, // 9: backup.v1.ListPitrTimerangesResponse.timeranges:type_name -> backup.v1.PitrTimerange + 10, // [10:10] is the sub-list for method output_type + 10, // [10:10] is the sub-list for method input_type + 10, // [10:10] is the sub-list for extension type_name + 10, // [10:10] is the sub-list for extension extendee + 0, // [0:10] is the sub-list for field type_name } func init() { file_backup_v1_artifacts_proto_init() } diff --git a/api/backup/v1/artifacts.pb.validate.go b/api/backup/v1/artifacts.pb.validate.go index 4b5be980436..28c9b558dec 100644 --- a/api/backup/v1/artifacts.pb.validate.go +++ b/api/backup/v1/artifacts.pb.validate.go @@ -144,6 +144,8 @@ func (m *Artifact) validate(all bool) error { } + // no validation rules for Compression + if len(errors) > 0 { return ArtifactMultiError(errors) } diff --git a/api/backup/v1/artifacts.proto b/api/backup/v1/artifacts.proto index 70e829904ea..d315db8109f 100644 --- a/api/backup/v1/artifacts.proto +++ b/api/backup/v1/artifacts.proto @@ -49,6 +49,8 @@ message Artifact { string folder = 13; // List of artifact metadata. repeated backup.v1.Metadata metadata_list = 14; + // Compression + BackupCompression compression = 15; } message ListArtifactsRequest {} diff --git a/api/backup/v1/backup.pb.go b/api/backup/v1/backup.pb.go index 14ff8f92f1c..496f6ffaa30 100644 --- a/api/backup/v1/backup.pb.go +++ b/api/backup/v1/backup.pb.go @@ -46,7 +46,9 @@ type StartBackupRequest struct { // DataModel represents the data model used for the backup. DataModel DataModel `protobuf:"varint,7,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"` // Folder on storage for artifact. - Folder string `protobuf:"bytes,8,opt,name=folder,proto3" json:"folder,omitempty"` + Folder string `protobuf:"bytes,8,opt,name=folder,proto3" json:"folder,omitempty"` + // Compression + Compression BackupCompression `protobuf:"varint,9,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -137,6 +139,13 @@ func (x *StartBackupRequest) GetFolder() string { return "" } +func (x *StartBackupRequest) GetCompression() BackupCompression { + if x != nil { + return x.Compression + } + return BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED +} + type StartBackupResponse struct { state protoimpl.MessageState `protogen:"open.v1"` // Unique identifier. @@ -319,7 +328,9 @@ type ScheduledBackup struct { // Next run. NextRun *timestamppb.Timestamp `protobuf:"bytes,18,opt,name=next_run,json=nextRun,proto3" json:"next_run,omitempty"` // How many artifacts keep. 0 - unlimited. - Retention uint32 `protobuf:"varint,19,opt,name=retention,proto3" json:"retention,omitempty"` + Retention uint32 `protobuf:"varint,19,opt,name=retention,proto3" json:"retention,omitempty"` + // Compression + Compression BackupCompression `protobuf:"varint,20,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -487,6 +498,13 @@ func (x *ScheduledBackup) GetRetention() uint32 { return 0 } +func (x *ScheduledBackup) GetCompression() BackupCompression { + if x != nil { + return x.Compression + } + return BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED +} + type ScheduleBackupRequest struct { state protoimpl.MessageState `protogen:"open.v1"` // Service identifier where backup should be performed. @@ -514,7 +532,9 @@ type ScheduleBackupRequest struct { // Backup data model (physical or logical). DataModel DataModel `protobuf:"varint,12,opt,name=data_model,json=dataModel,proto3,enum=backup.v1.DataModel" json:"data_model,omitempty"` // How many artifacts keep. 0 - unlimited. - Retention uint32 `protobuf:"varint,13,opt,name=retention,proto3" json:"retention,omitempty"` + Retention uint32 `protobuf:"varint,13,opt,name=retention,proto3" json:"retention,omitempty"` + // Compression + Compression BackupCompression `protobuf:"varint,14,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -640,6 +660,13 @@ func (x *ScheduleBackupRequest) GetRetention() uint32 { return 0 } +func (x *ScheduleBackupRequest) GetCompression() BackupCompression { + if x != nil { + return x.Compression + } + return BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED +} + type ScheduleBackupResponse struct { state protoimpl.MessageState `protogen:"open.v1"` ScheduledBackupId string `protobuf:"bytes,1,opt,name=scheduled_backup_id,json=scheduledBackupId,proto3" json:"scheduled_backup_id,omitempty"` @@ -1107,11 +1134,101 @@ func (x *GetLogsResponse) GetEnd() bool { return false } +type ListServiceCompressionRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Service identifier. + ServiceId string `protobuf:"bytes,1,opt,name=service_id,json=serviceId,proto3" json:"service_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListServiceCompressionRequest) Reset() { + *x = ListServiceCompressionRequest{} + mi := &file_backup_v1_backup_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListServiceCompressionRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceCompressionRequest) ProtoMessage() {} + +func (x *ListServiceCompressionRequest) ProtoReflect() protoreflect.Message { + mi := &file_backup_v1_backup_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceCompressionRequest.ProtoReflect.Descriptor instead. +func (*ListServiceCompressionRequest) Descriptor() ([]byte, []int) { + return file_backup_v1_backup_proto_rawDescGZIP(), []int{15} +} + +func (x *ListServiceCompressionRequest) GetServiceId() string { + if x != nil { + return x.ServiceId + } + return "" +} + +type ListServiceCompressionResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Available compression methods for the service. + CompressionMethods []BackupCompression `protobuf:"varint,1,rep,packed,name=compression_methods,json=compressionMethods,proto3,enum=backup.v1.BackupCompression" json:"compression_methods,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListServiceCompressionResponse) Reset() { + *x = ListServiceCompressionResponse{} + mi := &file_backup_v1_backup_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListServiceCompressionResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListServiceCompressionResponse) ProtoMessage() {} + +func (x *ListServiceCompressionResponse) ProtoReflect() protoreflect.Message { + mi := &file_backup_v1_backup_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListServiceCompressionResponse.ProtoReflect.Descriptor instead. +func (*ListServiceCompressionResponse) Descriptor() ([]byte, []int) { + return file_backup_v1_backup_proto_rawDescGZIP(), []int{16} +} + +func (x *ListServiceCompressionResponse) GetCompressionMethods() []BackupCompression { + if x != nil { + return x.CompressionMethods + } + return nil +} + var File_backup_v1_backup_proto protoreflect.FileDescriptor const file_backup_v1_backup_proto_rawDesc = "" + "\n" + - "\x16backup/v1/backup.proto\x12\tbackup.v1\x1a\x19backup/v1/artifacts.proto\x1a\x16backup/v1/common.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1binventory/v1/services.proto\x1a.protoc-gen-openapiv2/options/annotations.proto\x1a\x17validate/validate.proto\"\xc5\x02\n" + + "\x16backup/v1/backup.proto\x12\tbackup.v1\x1a\x19backup/v1/artifacts.proto\x1a\x16backup/v1/common.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1egoogle/protobuf/duration.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a\x1binventory/v1/services.proto\x1a.protoc-gen-openapiv2/options/annotations.proto\x1a\x17validate/validate.proto\"\x85\x03\n" + "\x12StartBackupRequest\x12&\n" + "\n" + "service_id\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\tserviceId\x12(\n" + @@ -1123,7 +1240,8 @@ const file_backup_v1_backup_proto_rawDesc = "" + "\aretries\x18\x06 \x01(\rR\aretries\x123\n" + "\n" + "data_model\x18\a \x01(\x0e2\x14.backup.v1.DataModelR\tdataModel\x12\x16\n" + - "\x06folder\x18\b \x01(\tR\x06folder\"6\n" + + "\x06folder\x18\b \x01(\tR\x06folder\x12>\n" + + "\vcompression\x18\t \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\"6\n" + "\x13StartBackupResponse\x12\x1f\n" + "\vartifact_id\x18\x01 \x01(\tR\n" + "artifactId\"Q\n" + @@ -1132,7 +1250,7 @@ const file_backup_v1_backup_proto_rawDesc = "" + "artifactId\"\x92\x01\n" + "&ListArtifactCompatibleServicesResponse\x120\n" + "\x05mysql\x18\x01 \x03(\v2\x1a.inventory.v1.MySQLServiceR\x05mysql\x126\n" + - "\amongodb\x18\x02 \x03(\v2\x1c.inventory.v1.MongoDBServiceR\amongodb\"\xf5\x05\n" + + "\amongodb\x18\x02 \x03(\v2\x1c.inventory.v1.MongoDBServiceR\amongodb\"\xb5\x06\n" + "\x0fScheduledBackup\x12.\n" + "\x13scheduled_backup_id\x18\x01 \x01(\tR\x11scheduledBackupId\x12\x1d\n" + "\n" + @@ -1157,7 +1275,8 @@ const file_backup_v1_backup_proto_rawDesc = "" + "\x06vendor\x18\x10 \x01(\tR\x06vendor\x125\n" + "\blast_run\x18\x11 \x01(\v2\x1a.google.protobuf.TimestampR\alastRun\x125\n" + "\bnext_run\x18\x12 \x01(\v2\x1a.google.protobuf.TimestampR\anextRun\x12\x1c\n" + - "\tretention\x18\x13 \x01(\rR\tretention\"\x98\x04\n" + + "\tretention\x18\x13 \x01(\rR\tretention\x12>\n" + + "\vcompression\x18\x14 \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\"\xd8\x04\n" + "\x15ScheduleBackupRequest\x12&\n" + "\n" + "service_id\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\tserviceId\x12(\n" + @@ -1176,7 +1295,8 @@ const file_backup_v1_backup_proto_rawDesc = "" + "\x04mode\x18\v \x01(\x0e2\x15.backup.v1.BackupModeR\x04mode\x123\n" + "\n" + "data_model\x18\f \x01(\x0e2\x14.backup.v1.DataModelR\tdataModel\x12\x1c\n" + - "\tretention\x18\r \x01(\rR\tretention\"H\n" + + "\tretention\x18\r \x01(\rR\tretention\x12>\n" + + "\vcompression\x18\x0e \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\"H\n" + "\x16ScheduleBackupResponse\x12.\n" + "\x13scheduled_backup_id\x18\x01 \x01(\tR\x11scheduledBackupId\"\x1d\n" + "\x1bListScheduledBackupsRequest\"g\n" + @@ -1213,7 +1333,12 @@ const file_backup_v1_backup_proto_rawDesc = "" + "\x05limit\x18\x03 \x01(\rR\x05limit\"L\n" + "\x0fGetLogsResponse\x12'\n" + "\x04logs\x18\x01 \x03(\v2\x13.backup.v1.LogChunkR\x04logs\x12\x10\n" + - "\x03end\x18\x02 \x01(\bR\x03end2\xc5\x12\n" + + "\x03end\x18\x02 \x01(\bR\x03end\"G\n" + + "\x1dListServiceCompressionRequest\x12&\n" + + "\n" + + "service_id\x18\x01 \x01(\tB\a\xfaB\x04r\x02\x10\x01R\tserviceId\"o\n" + + "\x1eListServiceCompressionResponse\x12M\n" + + "\x13compression_methods\x18\x01 \x03(\x0e2\x1c.backup.v1.BackupCompressionR\x12compressionMethods2\xc9\x14\n" + "\rBackupService\x12\xe7\x03\n" + "\vStartBackup\x12\x1d.backup.v1.StartBackupRequest\x1a\x1e.backup.v1.StartBackupResponse\"\x98\x03\x92A\xf8\x02\x12\x0eStart a Backup\x1a\xe5\x02Could return the Error message in the details containing specific ErrorCode indicating failure reason:\n" + "ERROR_CODE_XTRABACKUP_NOT_INSTALLED - xtrabackup is not installed on the service\n" + @@ -1227,7 +1352,8 @@ const file_backup_v1_backup_proto_rawDesc = "" + "\aGetLogs\x12\x19.backup.v1.GetLogsRequest\x1a\x1a.backup.v1.GetLogsResponse\"q\x92AH\x12\bGet Logs\x1a google.protobuf.Duration - 16, // 1: backup.v1.StartBackupRequest.data_model:type_name -> backup.v1.DataModel - 17, // 2: backup.v1.ListArtifactCompatibleServicesResponse.mysql:type_name -> inventory.v1.MySQLService - 18, // 3: backup.v1.ListArtifactCompatibleServicesResponse.mongodb:type_name -> inventory.v1.MongoDBService - 19, // 4: backup.v1.ScheduledBackup.start_time:type_name -> google.protobuf.Timestamp - 15, // 5: backup.v1.ScheduledBackup.retry_interval:type_name -> google.protobuf.Duration - 16, // 6: backup.v1.ScheduledBackup.data_model:type_name -> backup.v1.DataModel - 20, // 7: backup.v1.ScheduledBackup.mode:type_name -> backup.v1.BackupMode - 19, // 8: backup.v1.ScheduledBackup.last_run:type_name -> google.protobuf.Timestamp - 19, // 9: backup.v1.ScheduledBackup.next_run:type_name -> google.protobuf.Timestamp - 19, // 10: backup.v1.ScheduleBackupRequest.start_time:type_name -> google.protobuf.Timestamp - 15, // 11: backup.v1.ScheduleBackupRequest.retry_interval:type_name -> google.protobuf.Duration - 20, // 12: backup.v1.ScheduleBackupRequest.mode:type_name -> backup.v1.BackupMode - 16, // 13: backup.v1.ScheduleBackupRequest.data_model:type_name -> backup.v1.DataModel - 4, // 14: backup.v1.ListScheduledBackupsResponse.scheduled_backups:type_name -> backup.v1.ScheduledBackup - 19, // 15: backup.v1.ChangeScheduledBackupRequest.start_time:type_name -> google.protobuf.Timestamp - 15, // 16: backup.v1.ChangeScheduledBackupRequest.retry_interval:type_name -> google.protobuf.Duration - 21, // 17: backup.v1.GetLogsResponse.logs:type_name -> backup.v1.LogChunk - 0, // 18: backup.v1.BackupService.StartBackup:input_type -> backup.v1.StartBackupRequest - 2, // 19: backup.v1.BackupService.ListArtifactCompatibleServices:input_type -> backup.v1.ListArtifactCompatibleServicesRequest - 5, // 20: backup.v1.BackupService.ScheduleBackup:input_type -> backup.v1.ScheduleBackupRequest - 7, // 21: backup.v1.BackupService.ListScheduledBackups:input_type -> backup.v1.ListScheduledBackupsRequest - 9, // 22: backup.v1.BackupService.ChangeScheduledBackup:input_type -> backup.v1.ChangeScheduledBackupRequest - 11, // 23: backup.v1.BackupService.RemoveScheduledBackup:input_type -> backup.v1.RemoveScheduledBackupRequest - 13, // 24: backup.v1.BackupService.GetLogs:input_type -> backup.v1.GetLogsRequest - 22, // 25: backup.v1.BackupService.ListArtifacts:input_type -> backup.v1.ListArtifactsRequest - 23, // 26: backup.v1.BackupService.DeleteArtifact:input_type -> backup.v1.DeleteArtifactRequest - 24, // 27: backup.v1.BackupService.ListPitrTimeranges:input_type -> backup.v1.ListPitrTimerangesRequest - 1, // 28: backup.v1.BackupService.StartBackup:output_type -> backup.v1.StartBackupResponse - 3, // 29: backup.v1.BackupService.ListArtifactCompatibleServices:output_type -> backup.v1.ListArtifactCompatibleServicesResponse - 6, // 30: backup.v1.BackupService.ScheduleBackup:output_type -> backup.v1.ScheduleBackupResponse - 8, // 31: backup.v1.BackupService.ListScheduledBackups:output_type -> backup.v1.ListScheduledBackupsResponse - 10, // 32: backup.v1.BackupService.ChangeScheduledBackup:output_type -> backup.v1.ChangeScheduledBackupResponse - 12, // 33: backup.v1.BackupService.RemoveScheduledBackup:output_type -> backup.v1.RemoveScheduledBackupResponse - 14, // 34: backup.v1.BackupService.GetLogs:output_type -> backup.v1.GetLogsResponse - 25, // 35: backup.v1.BackupService.ListArtifacts:output_type -> backup.v1.ListArtifactsResponse - 26, // 36: backup.v1.BackupService.DeleteArtifact:output_type -> backup.v1.DeleteArtifactResponse - 27, // 37: backup.v1.BackupService.ListPitrTimeranges:output_type -> backup.v1.ListPitrTimerangesResponse - 28, // [28:38] is the sub-list for method output_type - 18, // [18:28] is the sub-list for method input_type - 18, // [18:18] is the sub-list for extension type_name - 18, // [18:18] is the sub-list for extension extendee - 0, // [0:18] is the sub-list for field type_name + 17, // 0: backup.v1.StartBackupRequest.retry_interval:type_name -> google.protobuf.Duration + 18, // 1: backup.v1.StartBackupRequest.data_model:type_name -> backup.v1.DataModel + 19, // 2: backup.v1.StartBackupRequest.compression:type_name -> backup.v1.BackupCompression + 20, // 3: backup.v1.ListArtifactCompatibleServicesResponse.mysql:type_name -> inventory.v1.MySQLService + 21, // 4: backup.v1.ListArtifactCompatibleServicesResponse.mongodb:type_name -> inventory.v1.MongoDBService + 22, // 5: backup.v1.ScheduledBackup.start_time:type_name -> google.protobuf.Timestamp + 17, // 6: backup.v1.ScheduledBackup.retry_interval:type_name -> google.protobuf.Duration + 18, // 7: backup.v1.ScheduledBackup.data_model:type_name -> backup.v1.DataModel + 23, // 8: backup.v1.ScheduledBackup.mode:type_name -> backup.v1.BackupMode + 22, // 9: backup.v1.ScheduledBackup.last_run:type_name -> google.protobuf.Timestamp + 22, // 10: backup.v1.ScheduledBackup.next_run:type_name -> google.protobuf.Timestamp + 19, // 11: backup.v1.ScheduledBackup.compression:type_name -> backup.v1.BackupCompression + 22, // 12: backup.v1.ScheduleBackupRequest.start_time:type_name -> google.protobuf.Timestamp + 17, // 13: backup.v1.ScheduleBackupRequest.retry_interval:type_name -> google.protobuf.Duration + 23, // 14: backup.v1.ScheduleBackupRequest.mode:type_name -> backup.v1.BackupMode + 18, // 15: backup.v1.ScheduleBackupRequest.data_model:type_name -> backup.v1.DataModel + 19, // 16: backup.v1.ScheduleBackupRequest.compression:type_name -> backup.v1.BackupCompression + 4, // 17: backup.v1.ListScheduledBackupsResponse.scheduled_backups:type_name -> backup.v1.ScheduledBackup + 22, // 18: backup.v1.ChangeScheduledBackupRequest.start_time:type_name -> google.protobuf.Timestamp + 17, // 19: backup.v1.ChangeScheduledBackupRequest.retry_interval:type_name -> google.protobuf.Duration + 24, // 20: backup.v1.GetLogsResponse.logs:type_name -> backup.v1.LogChunk + 19, // 21: backup.v1.ListServiceCompressionResponse.compression_methods:type_name -> backup.v1.BackupCompression + 0, // 22: backup.v1.BackupService.StartBackup:input_type -> backup.v1.StartBackupRequest + 2, // 23: backup.v1.BackupService.ListArtifactCompatibleServices:input_type -> backup.v1.ListArtifactCompatibleServicesRequest + 5, // 24: backup.v1.BackupService.ScheduleBackup:input_type -> backup.v1.ScheduleBackupRequest + 7, // 25: backup.v1.BackupService.ListScheduledBackups:input_type -> backup.v1.ListScheduledBackupsRequest + 9, // 26: backup.v1.BackupService.ChangeScheduledBackup:input_type -> backup.v1.ChangeScheduledBackupRequest + 11, // 27: backup.v1.BackupService.RemoveScheduledBackup:input_type -> backup.v1.RemoveScheduledBackupRequest + 13, // 28: backup.v1.BackupService.GetLogs:input_type -> backup.v1.GetLogsRequest + 25, // 29: backup.v1.BackupService.ListArtifacts:input_type -> backup.v1.ListArtifactsRequest + 26, // 30: backup.v1.BackupService.DeleteArtifact:input_type -> backup.v1.DeleteArtifactRequest + 27, // 31: backup.v1.BackupService.ListPitrTimeranges:input_type -> backup.v1.ListPitrTimerangesRequest + 15, // 32: backup.v1.BackupService.ListServiceCompression:input_type -> backup.v1.ListServiceCompressionRequest + 1, // 33: backup.v1.BackupService.StartBackup:output_type -> backup.v1.StartBackupResponse + 3, // 34: backup.v1.BackupService.ListArtifactCompatibleServices:output_type -> backup.v1.ListArtifactCompatibleServicesResponse + 6, // 35: backup.v1.BackupService.ScheduleBackup:output_type -> backup.v1.ScheduleBackupResponse + 8, // 36: backup.v1.BackupService.ListScheduledBackups:output_type -> backup.v1.ListScheduledBackupsResponse + 10, // 37: backup.v1.BackupService.ChangeScheduledBackup:output_type -> backup.v1.ChangeScheduledBackupResponse + 12, // 38: backup.v1.BackupService.RemoveScheduledBackup:output_type -> backup.v1.RemoveScheduledBackupResponse + 14, // 39: backup.v1.BackupService.GetLogs:output_type -> backup.v1.GetLogsResponse + 28, // 40: backup.v1.BackupService.ListArtifacts:output_type -> backup.v1.ListArtifactsResponse + 29, // 41: backup.v1.BackupService.DeleteArtifact:output_type -> backup.v1.DeleteArtifactResponse + 30, // 42: backup.v1.BackupService.ListPitrTimeranges:output_type -> backup.v1.ListPitrTimerangesResponse + 16, // 43: backup.v1.BackupService.ListServiceCompression:output_type -> backup.v1.ListServiceCompressionResponse + 33, // [33:44] is the sub-list for method output_type + 22, // [22:33] is the sub-list for method input_type + 22, // [22:22] is the sub-list for extension type_name + 22, // [22:22] is the sub-list for extension extendee + 0, // [0:22] is the sub-list for field type_name } func init() { file_backup_v1_backup_proto_init() } @@ -1337,7 +1472,7 @@ func file_backup_v1_backup_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_backup_proto_rawDesc), len(file_backup_v1_backup_proto_rawDesc)), NumEnums: 0, - NumMessages: 15, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/api/backup/v1/backup.pb.gw.go b/api/backup/v1/backup.pb.gw.go index f7daee52d8c..02dd4b0359f 100644 --- a/api/backup/v1/backup.pb.gw.go +++ b/api/backup/v1/backup.pb.gw.go @@ -381,6 +381,42 @@ func local_request_BackupService_ListPitrTimeranges_0(ctx context.Context, marsh return msg, metadata, err } +func request_BackupService_ListServiceCompression_0(ctx context.Context, marshaler runtime.Marshaler, client BackupServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListServiceCompressionRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["service_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id") + } + protoReq.ServiceId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err) + } + msg, err := client.ListServiceCompression(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err +} + +func local_request_BackupService_ListServiceCompression_0(ctx context.Context, marshaler runtime.Marshaler, server BackupServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var ( + protoReq ListServiceCompressionRequest + metadata runtime.ServerMetadata + err error + ) + val, ok := pathParams["service_id"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "service_id") + } + protoReq.ServiceId, err = runtime.String(val) + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "service_id", err) + } + msg, err := server.ListServiceCompression(ctx, &protoReq) + return msg, metadata, err +} + // RegisterBackupServiceHandlerServer registers the http handlers for service BackupService to "mux". // UnaryRPC :call BackupServiceServer directly. // StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. @@ -587,6 +623,26 @@ func RegisterBackupServiceHandlerServer(ctx context.Context, mux *runtime.ServeM } forward_BackupService_ListPitrTimeranges_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) + mux.Handle(http.MethodGet, pattern_BackupService_ListServiceCompression_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/backup.v1.BackupService/ListServiceCompression", runtime.WithHTTPPathPattern("/v1/backups/services/{service_id}/compression")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_BackupService_ListServiceCompression_0(annotatedContext, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_BackupService_ListServiceCompression_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil } @@ -797,6 +853,23 @@ func RegisterBackupServiceHandlerClient(ctx context.Context, mux *runtime.ServeM } forward_BackupService_ListPitrTimeranges_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) + mux.Handle(http.MethodGet, pattern_BackupService_ListServiceCompression_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + annotatedContext, err := runtime.AnnotateContext(ctx, mux, req, "/backup.v1.BackupService/ListServiceCompression", runtime.WithHTTPPathPattern("/v1/backups/services/{service_id}/compression")) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_BackupService_ListServiceCompression_0(annotatedContext, inboundMarshaler, client, req, pathParams) + annotatedContext = runtime.NewServerMetadataContext(annotatedContext, md) + if err != nil { + runtime.HTTPError(annotatedContext, mux, outboundMarshaler, w, req, err) + return + } + forward_BackupService_ListServiceCompression_0(annotatedContext, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + }) return nil } @@ -811,6 +884,7 @@ var ( pattern_BackupService_ListArtifacts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v1", "backups", "artifacts"}, "")) pattern_BackupService_DeleteArtifact_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3}, []string{"v1", "backups", "artifacts", "artifact_id"}, "")) pattern_BackupService_ListPitrTimeranges_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "backups", "artifacts", "artifact_id", "pitr-timeranges"}, "")) + pattern_BackupService_ListServiceCompression_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"v1", "backups", "services", "service_id", "compression"}, "")) ) var ( @@ -824,4 +898,5 @@ var ( forward_BackupService_ListArtifacts_0 = runtime.ForwardResponseMessage forward_BackupService_DeleteArtifact_0 = runtime.ForwardResponseMessage forward_BackupService_ListPitrTimeranges_0 = runtime.ForwardResponseMessage + forward_BackupService_ListServiceCompression_0 = runtime.ForwardResponseMessage ) diff --git a/api/backup/v1/backup.pb.validate.go b/api/backup/v1/backup.pb.validate.go index 743bd45f688..bd03b54dfb3 100644 --- a/api/backup/v1/backup.pb.validate.go +++ b/api/backup/v1/backup.pb.validate.go @@ -118,6 +118,8 @@ func (m *StartBackupRequest) validate(all bool) error { // no validation rules for Folder + // no validation rules for Compression + if len(errors) > 0 { return StartBackupRequestMultiError(errors) } @@ -760,6 +762,8 @@ func (m *ScheduledBackup) validate(all bool) error { // no validation rules for Retention + // no validation rules for Compression + if len(errors) > 0 { return ScheduledBackupMultiError(errors) } @@ -967,6 +971,8 @@ func (m *ScheduleBackupRequest) validate(all bool) error { // no validation rules for Retention + // no validation rules for Compression + if len(errors) > 0 { return ScheduleBackupRequestMultiError(errors) } @@ -2157,3 +2163,220 @@ var _ interface { Cause() error ErrorName() string } = GetLogsResponseValidationError{} + +// Validate checks the field values on ListServiceCompressionRequest with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListServiceCompressionRequest) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListServiceCompressionRequest with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ListServiceCompressionRequestMultiError, or nil if none found. +func (m *ListServiceCompressionRequest) ValidateAll() error { + return m.validate(true) +} + +func (m *ListServiceCompressionRequest) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if utf8.RuneCountInString(m.GetServiceId()) < 1 { + err := ListServiceCompressionRequestValidationError{ + field: "ServiceId", + reason: "value length must be at least 1 runes", + } + if !all { + return err + } + errors = append(errors, err) + } + + if len(errors) > 0 { + return ListServiceCompressionRequestMultiError(errors) + } + + return nil +} + +// ListServiceCompressionRequestMultiError is an error wrapping multiple +// validation errors returned by ListServiceCompressionRequest.ValidateAll() +// if the designated constraints aren't met. +type ListServiceCompressionRequestMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListServiceCompressionRequestMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListServiceCompressionRequestMultiError) AllErrors() []error { return m } + +// ListServiceCompressionRequestValidationError is the validation error +// returned by ListServiceCompressionRequest.Validate if the designated +// constraints aren't met. +type ListServiceCompressionRequestValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListServiceCompressionRequestValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListServiceCompressionRequestValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListServiceCompressionRequestValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListServiceCompressionRequestValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListServiceCompressionRequestValidationError) ErrorName() string { + return "ListServiceCompressionRequestValidationError" +} + +// Error satisfies the builtin error interface +func (e ListServiceCompressionRequestValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListServiceCompressionRequest.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListServiceCompressionRequestValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListServiceCompressionRequestValidationError{} + +// Validate checks the field values on ListServiceCompressionResponse with the +// rules defined in the proto definition for this message. If any rules are +// violated, the first error encountered is returned, or nil if there are no violations. +func (m *ListServiceCompressionResponse) Validate() error { + return m.validate(false) +} + +// ValidateAll checks the field values on ListServiceCompressionResponse with +// the rules defined in the proto definition for this message. If any rules +// are violated, the result is a list of violation errors wrapped in +// ListServiceCompressionResponseMultiError, or nil if none found. +func (m *ListServiceCompressionResponse) ValidateAll() error { + return m.validate(true) +} + +func (m *ListServiceCompressionResponse) validate(all bool) error { + if m == nil { + return nil + } + + var errors []error + + if len(errors) > 0 { + return ListServiceCompressionResponseMultiError(errors) + } + + return nil +} + +// ListServiceCompressionResponseMultiError is an error wrapping multiple +// validation errors returned by ListServiceCompressionResponse.ValidateAll() +// if the designated constraints aren't met. +type ListServiceCompressionResponseMultiError []error + +// Error returns a concatenation of all the error messages it wraps. +func (m ListServiceCompressionResponseMultiError) Error() string { + msgs := make([]string, 0, len(m)) + for _, err := range m { + msgs = append(msgs, err.Error()) + } + return strings.Join(msgs, "; ") +} + +// AllErrors returns a list of validation violation errors. +func (m ListServiceCompressionResponseMultiError) AllErrors() []error { return m } + +// ListServiceCompressionResponseValidationError is the validation error +// returned by ListServiceCompressionResponse.Validate if the designated +// constraints aren't met. +type ListServiceCompressionResponseValidationError struct { + field string + reason string + cause error + key bool +} + +// Field function returns field value. +func (e ListServiceCompressionResponseValidationError) Field() string { return e.field } + +// Reason function returns reason value. +func (e ListServiceCompressionResponseValidationError) Reason() string { return e.reason } + +// Cause function returns cause value. +func (e ListServiceCompressionResponseValidationError) Cause() error { return e.cause } + +// Key function returns key value. +func (e ListServiceCompressionResponseValidationError) Key() bool { return e.key } + +// ErrorName returns error name. +func (e ListServiceCompressionResponseValidationError) ErrorName() string { + return "ListServiceCompressionResponseValidationError" +} + +// Error satisfies the builtin error interface +func (e ListServiceCompressionResponseValidationError) Error() string { + cause := "" + if e.cause != nil { + cause = fmt.Sprintf(" | caused by: %v", e.cause) + } + + key := "" + if e.key { + key = "key for " + } + + return fmt.Sprintf( + "invalid %sListServiceCompressionResponse.%s: %s%s", + key, + e.field, + e.reason, + cause) +} + +var _ error = ListServiceCompressionResponseValidationError{} + +var _ interface { + Field() string + Reason() string + Key() bool + Cause() error + ErrorName() string +} = ListServiceCompressionResponseValidationError{} diff --git a/api/backup/v1/backup.proto b/api/backup/v1/backup.proto index a775f7dceae..43be7c46be6 100644 --- a/api/backup/v1/backup.proto +++ b/api/backup/v1/backup.proto @@ -28,6 +28,8 @@ message StartBackupRequest { DataModel data_model = 7; // Folder on storage for artifact. string folder = 8; + // Compression + BackupCompression compression = 9; } message StartBackupResponse { @@ -85,6 +87,8 @@ message ScheduledBackup { google.protobuf.Timestamp next_run = 18; // How many artifacts keep. 0 - unlimited. uint32 retention = 19; + // Compression + BackupCompression compression = 20; } message ScheduleBackupRequest { @@ -114,6 +118,8 @@ message ScheduleBackupRequest { DataModel data_model = 12; // How many artifacts keep. 0 - unlimited. uint32 retention = 13; + // Compression + BackupCompression compression = 14; } message ScheduleBackupResponse { @@ -164,6 +170,16 @@ message GetLogsResponse { bool end = 2; } +message ListServiceCompressionRequest { + // Service identifier. + string service_id = 1 [(validate.rules).string.min_len = 1]; +} + +message ListServiceCompressionResponse { + // Available compression methods for the service. + repeated BackupCompression compression_methods = 1; +} + // Backups service handles backup operations to DB. service BackupService { // StartBackup request backup specified service to location. @@ -256,4 +272,13 @@ service BackupService { description: "Return a list of available MongoDB point-in-time-recovery timeranges." }; } + + // ListServiceCompression returns available compression methods for a service. + rpc ListServiceCompression(ListServiceCompressionRequest) returns (ListServiceCompressionResponse) { + option (google.api.http) = {get: "/v1/backups/services/{service_id}/compression"}; + option (grpc.gateway.protoc_gen_openapiv2.options.openapiv2_operation) = { + summary: "List Service Compression" + description: "Return a list of available compression methods for a service." + }; + } } diff --git a/api/backup/v1/backup_grpc.pb.go b/api/backup/v1/backup_grpc.pb.go index 42c2f585611..c6224b83392 100644 --- a/api/backup/v1/backup_grpc.pb.go +++ b/api/backup/v1/backup_grpc.pb.go @@ -30,6 +30,7 @@ const ( BackupService_ListArtifacts_FullMethodName = "/backup.v1.BackupService/ListArtifacts" BackupService_DeleteArtifact_FullMethodName = "/backup.v1.BackupService/DeleteArtifact" BackupService_ListPitrTimeranges_FullMethodName = "/backup.v1.BackupService/ListPitrTimeranges" + BackupService_ListServiceCompression_FullMethodName = "/backup.v1.BackupService/ListServiceCompression" ) // BackupServiceClient is the client API for BackupService service. @@ -58,6 +59,8 @@ type BackupServiceClient interface { DeleteArtifact(ctx context.Context, in *DeleteArtifactRequest, opts ...grpc.CallOption) (*DeleteArtifactResponse, error) // ListPitrTimeranges list the available MongoDB PITR timeranges in a given backup location ListPitrTimeranges(ctx context.Context, in *ListPitrTimerangesRequest, opts ...grpc.CallOption) (*ListPitrTimerangesResponse, error) + // ListServiceCompression returns available compression methods for a service. + ListServiceCompression(ctx context.Context, in *ListServiceCompressionRequest, opts ...grpc.CallOption) (*ListServiceCompressionResponse, error) } type backupServiceClient struct { @@ -168,6 +171,16 @@ func (c *backupServiceClient) ListPitrTimeranges(ctx context.Context, in *ListPi return out, nil } +func (c *backupServiceClient) ListServiceCompression(ctx context.Context, in *ListServiceCompressionRequest, opts ...grpc.CallOption) (*ListServiceCompressionResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListServiceCompressionResponse) + err := c.cc.Invoke(ctx, BackupService_ListServiceCompression_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + // BackupServiceServer is the server API for BackupService service. // All implementations must embed UnimplementedBackupServiceServer // for forward compatibility. @@ -194,6 +207,8 @@ type BackupServiceServer interface { DeleteArtifact(context.Context, *DeleteArtifactRequest) (*DeleteArtifactResponse, error) // ListPitrTimeranges list the available MongoDB PITR timeranges in a given backup location ListPitrTimeranges(context.Context, *ListPitrTimerangesRequest) (*ListPitrTimerangesResponse, error) + // ListServiceCompression returns available compression methods for a service. + ListServiceCompression(context.Context, *ListServiceCompressionRequest) (*ListServiceCompressionResponse, error) mustEmbedUnimplementedBackupServiceServer() } @@ -243,6 +258,10 @@ func (UnimplementedBackupServiceServer) DeleteArtifact(context.Context, *DeleteA func (UnimplementedBackupServiceServer) ListPitrTimeranges(context.Context, *ListPitrTimerangesRequest) (*ListPitrTimerangesResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ListPitrTimeranges not implemented") } + +func (UnimplementedBackupServiceServer) ListServiceCompression(context.Context, *ListServiceCompressionRequest) (*ListServiceCompressionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListServiceCompression not implemented") +} func (UnimplementedBackupServiceServer) mustEmbedUnimplementedBackupServiceServer() {} func (UnimplementedBackupServiceServer) testEmbeddedByValue() {} @@ -444,6 +463,24 @@ func _BackupService_ListPitrTimeranges_Handler(srv interface{}, ctx context.Cont return interceptor(ctx, in, info, handler) } +func _BackupService_ListServiceCompression_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListServiceCompressionRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListServiceCompression(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListServiceCompression_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListServiceCompression(ctx, req.(*ListServiceCompressionRequest)) + } + return interceptor(ctx, in, info, handler) +} + // BackupService_ServiceDesc is the grpc.ServiceDesc for BackupService service. // It's only intended for direct use with grpc.RegisterService, // and not to be introspected or modified (even as a copy) @@ -491,6 +528,10 @@ var BackupService_ServiceDesc = grpc.ServiceDesc{ MethodName: "ListPitrTimeranges", Handler: _BackupService_ListPitrTimeranges_Handler, }, + { + MethodName: "ListServiceCompression", + Handler: _BackupService_ListServiceCompression_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "backup/v1/backup.proto", diff --git a/api/backup/v1/common.pb.go b/api/backup/v1/common.pb.go index de41c31ac70..27e049c3013 100644 --- a/api/backup/v1/common.pb.go +++ b/api/backup/v1/common.pb.go @@ -127,6 +127,77 @@ func (BackupMode) EnumDescriptor() ([]byte, []int) { return file_backup_v1_common_proto_rawDescGZIP(), []int{1} } +// BackupCompression specifies compression +type BackupCompression int32 + +const ( + BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED BackupCompression = 0 + BackupCompression_BACKUP_COMPRESSION_DEFAULT BackupCompression = 1 + BackupCompression_BACKUP_COMPRESSION_NONE BackupCompression = 2 + BackupCompression_BACKUP_COMPRESSION_QUICKLZ BackupCompression = 3 + BackupCompression_BACKUP_COMPRESSION_ZSTD BackupCompression = 4 + BackupCompression_BACKUP_COMPRESSION_LZ4 BackupCompression = 5 + BackupCompression_BACKUP_COMPRESSION_S2 BackupCompression = 6 + BackupCompression_BACKUP_COMPRESSION_GZIP BackupCompression = 7 + BackupCompression_BACKUP_COMPRESSION_SNAPPY BackupCompression = 8 + BackupCompression_BACKUP_COMPRESSION_PGZIP BackupCompression = 9 +) + +// Enum value maps for BackupCompression. +var ( + BackupCompression_name = map[int32]string{ + 0: "BACKUP_COMPRESSION_UNSPECIFIED", + 1: "BACKUP_COMPRESSION_DEFAULT", + 2: "BACKUP_COMPRESSION_NONE", + 3: "BACKUP_COMPRESSION_QUICKLZ", + 4: "BACKUP_COMPRESSION_ZSTD", + 5: "BACKUP_COMPRESSION_LZ4", + 6: "BACKUP_COMPRESSION_S2", + 7: "BACKUP_COMPRESSION_GZIP", + 8: "BACKUP_COMPRESSION_SNAPPY", + 9: "BACKUP_COMPRESSION_PGZIP", + } + BackupCompression_value = map[string]int32{ + "BACKUP_COMPRESSION_UNSPECIFIED": 0, + "BACKUP_COMPRESSION_DEFAULT": 1, + "BACKUP_COMPRESSION_NONE": 2, + "BACKUP_COMPRESSION_QUICKLZ": 3, + "BACKUP_COMPRESSION_ZSTD": 4, + "BACKUP_COMPRESSION_LZ4": 5, + "BACKUP_COMPRESSION_S2": 6, + "BACKUP_COMPRESSION_GZIP": 7, + "BACKUP_COMPRESSION_SNAPPY": 8, + "BACKUP_COMPRESSION_PGZIP": 9, + } +) + +func (x BackupCompression) Enum() *BackupCompression { + p := new(BackupCompression) + *p = x + return p +} + +func (x BackupCompression) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (BackupCompression) Descriptor() protoreflect.EnumDescriptor { + return file_backup_v1_common_proto_enumTypes[2].Descriptor() +} + +func (BackupCompression) Type() protoreflect.EnumType { + return &file_backup_v1_common_proto_enumTypes[2] +} + +func (x BackupCompression) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use BackupCompression.Descriptor instead. +func (BackupCompression) EnumDescriptor() ([]byte, []int) { + return file_backup_v1_common_proto_rawDescGZIP(), []int{2} +} + // File represents file or folder on a storage. type File struct { state protoimpl.MessageState `protogen:"open.v1"` @@ -394,7 +465,18 @@ const file_backup_v1_common_proto_rawDesc = "" + "\x17BACKUP_MODE_UNSPECIFIED\x10\x00\x12\x18\n" + "\x14BACKUP_MODE_SNAPSHOT\x10\x01\x12\x1b\n" + "\x17BACKUP_MODE_INCREMENTAL\x10\x02\x12\x14\n" + - "\x10BACKUP_MODE_PITR\x10\x03B\x90\x01\n" + + "\x10BACKUP_MODE_PITR\x10\x03*\xc2\x02\n" + + "\x11BackupCompression\x12\"\n" + + "\x1eBACKUP_COMPRESSION_UNSPECIFIED\x10\x00\x12\x1e\n" + + "\x1aBACKUP_COMPRESSION_DEFAULT\x10\x01\x12\x1b\n" + + "\x17BACKUP_COMPRESSION_NONE\x10\x02\x12\x1e\n" + + "\x1aBACKUP_COMPRESSION_QUICKLZ\x10\x03\x12\x1b\n" + + "\x17BACKUP_COMPRESSION_ZSTD\x10\x04\x12\x1a\n" + + "\x16BACKUP_COMPRESSION_LZ4\x10\x05\x12\x19\n" + + "\x15BACKUP_COMPRESSION_S2\x10\x06\x12\x1b\n" + + "\x17BACKUP_COMPRESSION_GZIP\x10\a\x12\x1d\n" + + "\x19BACKUP_COMPRESSION_SNAPPY\x10\b\x12\x1c\n" + + "\x18BACKUP_COMPRESSION_PGZIP\x10\tB\x90\x01\n" + "\rcom.backup.v1B\vCommonProtoP\x01Z-github.com/percona/pmm/api/backup/v1;backupv1\xa2\x02\x03BXX\xaa\x02\tBackup.V1\xca\x02\tBackup\\V1\xe2\x02\x15Backup\\V1\\GPBMetadata\xea\x02\n" + "Backup::V1b\x06proto3" @@ -411,23 +493,24 @@ func file_backup_v1_common_proto_rawDescGZIP() []byte { } var ( - file_backup_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 2) + file_backup_v1_common_proto_enumTypes = make([]protoimpl.EnumInfo, 3) file_backup_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 4) file_backup_v1_common_proto_goTypes = []any{ (DataModel)(0), // 0: backup.v1.DataModel (BackupMode)(0), // 1: backup.v1.BackupMode - (*File)(nil), // 2: backup.v1.File - (*PbmMetadata)(nil), // 3: backup.v1.PbmMetadata - (*Metadata)(nil), // 4: backup.v1.Metadata - (*LogChunk)(nil), // 5: backup.v1.LogChunk - (*timestamppb.Timestamp)(nil), // 6: google.protobuf.Timestamp + (BackupCompression)(0), // 2: backup.v1.BackupCompression + (*File)(nil), // 3: backup.v1.File + (*PbmMetadata)(nil), // 4: backup.v1.PbmMetadata + (*Metadata)(nil), // 5: backup.v1.Metadata + (*LogChunk)(nil), // 6: backup.v1.LogChunk + (*timestamppb.Timestamp)(nil), // 7: google.protobuf.Timestamp } ) var file_backup_v1_common_proto_depIdxs = []int32{ - 2, // 0: backup.v1.Metadata.file_list:type_name -> backup.v1.File - 6, // 1: backup.v1.Metadata.restore_to:type_name -> google.protobuf.Timestamp - 3, // 2: backup.v1.Metadata.pbm_metadata:type_name -> backup.v1.PbmMetadata + 3, // 0: backup.v1.Metadata.file_list:type_name -> backup.v1.File + 7, // 1: backup.v1.Metadata.restore_to:type_name -> google.protobuf.Timestamp + 4, // 2: backup.v1.Metadata.pbm_metadata:type_name -> backup.v1.PbmMetadata 3, // [3:3] is the sub-list for method output_type 3, // [3:3] is the sub-list for method input_type 3, // [3:3] is the sub-list for extension type_name @@ -448,7 +531,7 @@ func file_backup_v1_common_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_backup_v1_common_proto_rawDesc), len(file_backup_v1_common_proto_rawDesc)), - NumEnums: 2, + NumEnums: 3, NumMessages: 4, NumExtensions: 0, NumServices: 0, diff --git a/api/backup/v1/common.proto b/api/backup/v1/common.proto index fe5679fcf81..25ffe91a664 100644 --- a/api/backup/v1/common.proto +++ b/api/backup/v1/common.proto @@ -20,6 +20,20 @@ enum BackupMode { BACKUP_MODE_PITR = 3; } +// BackupCompression specifies compression +enum BackupCompression { + BACKUP_COMPRESSION_UNSPECIFIED = 0; + BACKUP_COMPRESSION_DEFAULT = 1; + BACKUP_COMPRESSION_NONE = 2; + BACKUP_COMPRESSION_QUICKLZ = 3; + BACKUP_COMPRESSION_ZSTD = 4; + BACKUP_COMPRESSION_LZ4 = 5; + BACKUP_COMPRESSION_S2 = 6; + BACKUP_COMPRESSION_GZIP = 7; + BACKUP_COMPRESSION_SNAPPY = 8; + BACKUP_COMPRESSION_PGZIP = 9; +} + // File represents file or folder on a storage. message File { string name = 1 [(validate.rules).string.min_len = 1]; diff --git a/api/backup/v1/json/client/backup_service/backup_service_client.go b/api/backup/v1/json/client/backup_service/backup_service_client.go index a3417653225..a2e7059ead6 100644 --- a/api/backup/v1/json/client/backup_service/backup_service_client.go +++ b/api/backup/v1/json/client/backup_service/backup_service_client.go @@ -68,6 +68,8 @@ type ClientService interface { ListScheduledBackups(params *ListScheduledBackupsParams, opts ...ClientOption) (*ListScheduledBackupsOK, error) + ListServiceCompression(params *ListServiceCompressionParams, opts ...ClientOption) (*ListServiceCompressionOK, error) + RemoveScheduledBackup(params *RemoveScheduledBackupParams, opts ...ClientOption) (*RemoveScheduledBackupOK, error) ScheduleBackup(params *ScheduleBackupParams, opts ...ClientOption) (*ScheduleBackupOK, error) @@ -385,6 +387,45 @@ func (a *Client) ListScheduledBackups(params *ListScheduledBackupsParams, opts . return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) } +/* +ListServiceCompression lists service compression + +Return a list of available compression methods for a service. +*/ +func (a *Client) ListServiceCompression(params *ListServiceCompressionParams, opts ...ClientOption) (*ListServiceCompressionOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewListServiceCompressionParams() + } + op := &runtime.ClientOperation{ + ID: "ListServiceCompression", + Method: "GET", + PathPattern: "/v1/backups/services/{service_id}/compression", + ProducesMediaTypes: []string{"application/json"}, + ConsumesMediaTypes: []string{"application/json"}, + Schemes: []string{"http", "https"}, + Params: params, + Reader: &ListServiceCompressionReader{formats: a.formats}, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ListServiceCompressionOK) + if ok { + return success, nil + } + // unexpected success response + unexpectedSuccess := result.(*ListServiceCompressionDefault) + return nil, runtime.NewAPIError("unexpected success response: content available as default response in error", unexpectedSuccess, unexpectedSuccess.Code()) +} + /* RemoveScheduledBackup removes a scheduled backup diff --git a/api/backup/v1/json/client/backup_service/list_artifacts_responses.go b/api/backup/v1/json/client/backup_service/list_artifacts_responses.go index 36316157438..99fe63caa0c 100644 --- a/api/backup/v1/json/client/backup_service/list_artifacts_responses.go +++ b/api/backup/v1/json/client/backup_service/list_artifacts_responses.go @@ -577,6 +577,10 @@ type ListArtifactsOKBodyArtifactsItems0 struct { // List of artifact metadata. MetadataList []*ListArtifactsOKBodyArtifactsItems0MetadataListItems0 `json:"metadata_list"` + + // BackupCompression specifies compression + // Enum: ["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"] + Compression *string `json:"compression,omitempty"` } // Validate validates this list artifacts OK body artifacts items0 @@ -603,6 +607,10 @@ func (o *ListArtifactsOKBodyArtifactsItems0) Validate(formats strfmt.Registry) e res = append(res, err) } + if err := o.validateCompression(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -807,6 +815,72 @@ func (o *ListArtifactsOKBodyArtifactsItems0) validateMetadataList(formats strfmt return nil } +var listArtifactsOkBodyArtifactsItems0TypeCompressionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + listArtifactsOkBodyArtifactsItems0TypeCompressionPropEnum = append(listArtifactsOkBodyArtifactsItems0TypeCompressionPropEnum, v) + } +} + +const ( + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONUNSPECIFIED captures enum value "BACKUP_COMPRESSION_UNSPECIFIED" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONUNSPECIFIED string = "BACKUP_COMPRESSION_UNSPECIFIED" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONDEFAULT captures enum value "BACKUP_COMPRESSION_DEFAULT" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONDEFAULT string = "BACKUP_COMPRESSION_DEFAULT" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONNONE captures enum value "BACKUP_COMPRESSION_NONE" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONNONE string = "BACKUP_COMPRESSION_NONE" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONQUICKLZ captures enum value "BACKUP_COMPRESSION_QUICKLZ" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONQUICKLZ string = "BACKUP_COMPRESSION_QUICKLZ" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONZSTD captures enum value "BACKUP_COMPRESSION_ZSTD" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONZSTD string = "BACKUP_COMPRESSION_ZSTD" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONLZ4 captures enum value "BACKUP_COMPRESSION_LZ4" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONLZ4 string = "BACKUP_COMPRESSION_LZ4" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONS2 captures enum value "BACKUP_COMPRESSION_S2" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONS2 string = "BACKUP_COMPRESSION_S2" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONGZIP captures enum value "BACKUP_COMPRESSION_GZIP" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONGZIP string = "BACKUP_COMPRESSION_GZIP" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONSNAPPY captures enum value "BACKUP_COMPRESSION_SNAPPY" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONSNAPPY string = "BACKUP_COMPRESSION_SNAPPY" + + // ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONPGZIP captures enum value "BACKUP_COMPRESSION_PGZIP" + ListArtifactsOKBodyArtifactsItems0CompressionBACKUPCOMPRESSIONPGZIP string = "BACKUP_COMPRESSION_PGZIP" +) + +// prop value enum +func (o *ListArtifactsOKBodyArtifactsItems0) validateCompressionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, listArtifactsOkBodyArtifactsItems0TypeCompressionPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *ListArtifactsOKBodyArtifactsItems0) validateCompression(formats strfmt.Registry) error { + if swag.IsZero(o.Compression) { // not required + return nil + } + + // value enum + if err := o.validateCompressionEnum("compression", "body", *o.Compression); err != nil { + return err + } + + return nil +} + // ContextValidate validate this list artifacts OK body artifacts items0 based on the context it is used func (o *ListArtifactsOKBodyArtifactsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { var res []error diff --git a/api/backup/v1/json/client/backup_service/list_scheduled_backups_responses.go b/api/backup/v1/json/client/backup_service/list_scheduled_backups_responses.go index 838aafb1637..d439284eab0 100644 --- a/api/backup/v1/json/client/backup_service/list_scheduled_backups_responses.go +++ b/api/backup/v1/json/client/backup_service/list_scheduled_backups_responses.go @@ -593,6 +593,10 @@ type ListScheduledBackupsOKBodyScheduledBackupsItems0 struct { // How many artifacts keep. 0 - unlimited. Retention int64 `json:"retention,omitempty"` + + // BackupCompression specifies compression + // Enum: ["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"] + Compression *string `json:"compression,omitempty"` } // Validate validates this list scheduled backups OK body scheduled backups items0 @@ -619,6 +623,10 @@ func (o *ListScheduledBackupsOKBodyScheduledBackupsItems0) Validate(formats strf res = append(res, err) } + if err := o.validateCompression(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -754,6 +762,72 @@ func (o *ListScheduledBackupsOKBodyScheduledBackupsItems0) validateNextRun(forma return nil } +var listScheduledBackupsOkBodyScheduledBackupsItems0TypeCompressionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + listScheduledBackupsOkBodyScheduledBackupsItems0TypeCompressionPropEnum = append(listScheduledBackupsOkBodyScheduledBackupsItems0TypeCompressionPropEnum, v) + } +} + +const ( + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONUNSPECIFIED captures enum value "BACKUP_COMPRESSION_UNSPECIFIED" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONUNSPECIFIED string = "BACKUP_COMPRESSION_UNSPECIFIED" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONDEFAULT captures enum value "BACKUP_COMPRESSION_DEFAULT" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONDEFAULT string = "BACKUP_COMPRESSION_DEFAULT" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONNONE captures enum value "BACKUP_COMPRESSION_NONE" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONNONE string = "BACKUP_COMPRESSION_NONE" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONQUICKLZ captures enum value "BACKUP_COMPRESSION_QUICKLZ" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONQUICKLZ string = "BACKUP_COMPRESSION_QUICKLZ" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONZSTD captures enum value "BACKUP_COMPRESSION_ZSTD" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONZSTD string = "BACKUP_COMPRESSION_ZSTD" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONLZ4 captures enum value "BACKUP_COMPRESSION_LZ4" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONLZ4 string = "BACKUP_COMPRESSION_LZ4" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONS2 captures enum value "BACKUP_COMPRESSION_S2" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONS2 string = "BACKUP_COMPRESSION_S2" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONGZIP captures enum value "BACKUP_COMPRESSION_GZIP" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONGZIP string = "BACKUP_COMPRESSION_GZIP" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONSNAPPY captures enum value "BACKUP_COMPRESSION_SNAPPY" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONSNAPPY string = "BACKUP_COMPRESSION_SNAPPY" + + // ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONPGZIP captures enum value "BACKUP_COMPRESSION_PGZIP" + ListScheduledBackupsOKBodyScheduledBackupsItems0CompressionBACKUPCOMPRESSIONPGZIP string = "BACKUP_COMPRESSION_PGZIP" +) + +// prop value enum +func (o *ListScheduledBackupsOKBodyScheduledBackupsItems0) validateCompressionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, listScheduledBackupsOkBodyScheduledBackupsItems0TypeCompressionPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *ListScheduledBackupsOKBodyScheduledBackupsItems0) validateCompression(formats strfmt.Registry) error { + if swag.IsZero(o.Compression) { // not required + return nil + } + + // value enum + if err := o.validateCompressionEnum("compression", "body", *o.Compression); err != nil { + return err + } + + return nil +} + // ContextValidate validates this list scheduled backups OK body scheduled backups items0 based on context it is used func (o *ListScheduledBackupsOKBodyScheduledBackupsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/api/backup/v1/json/client/backup_service/list_service_compression_parameters.go b/api/backup/v1/json/client/backup_service/list_service_compression_parameters.go new file mode 100644 index 00000000000..5f6ee5f56f9 --- /dev/null +++ b/api/backup/v1/json/client/backup_service/list_service_compression_parameters.go @@ -0,0 +1,149 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package backup_service + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewListServiceCompressionParams creates a new ListServiceCompressionParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewListServiceCompressionParams() *ListServiceCompressionParams { + return &ListServiceCompressionParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewListServiceCompressionParamsWithTimeout creates a new ListServiceCompressionParams object +// with the ability to set a timeout on a request. +func NewListServiceCompressionParamsWithTimeout(timeout time.Duration) *ListServiceCompressionParams { + return &ListServiceCompressionParams{ + timeout: timeout, + } +} + +// NewListServiceCompressionParamsWithContext creates a new ListServiceCompressionParams object +// with the ability to set a context for a request. +func NewListServiceCompressionParamsWithContext(ctx context.Context) *ListServiceCompressionParams { + return &ListServiceCompressionParams{ + Context: ctx, + } +} + +// NewListServiceCompressionParamsWithHTTPClient creates a new ListServiceCompressionParams object +// with the ability to set a custom HTTPClient for a request. +func NewListServiceCompressionParamsWithHTTPClient(client *http.Client) *ListServiceCompressionParams { + return &ListServiceCompressionParams{ + HTTPClient: client, + } +} + +/* +ListServiceCompressionParams contains all the parameters to send to the API endpoint + + for the list service compression operation. + + Typically these are written to a http.Request. +*/ +type ListServiceCompressionParams struct { + /* ServiceID. + + Service identifier. + */ + ServiceID string + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the list service compression params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListServiceCompressionParams) WithDefaults() *ListServiceCompressionParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the list service compression params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ListServiceCompressionParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the list service compression params +func (o *ListServiceCompressionParams) WithTimeout(timeout time.Duration) *ListServiceCompressionParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the list service compression params +func (o *ListServiceCompressionParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the list service compression params +func (o *ListServiceCompressionParams) WithContext(ctx context.Context) *ListServiceCompressionParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the list service compression params +func (o *ListServiceCompressionParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the list service compression params +func (o *ListServiceCompressionParams) WithHTTPClient(client *http.Client) *ListServiceCompressionParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the list service compression params +func (o *ListServiceCompressionParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithServiceID adds the serviceID to the list service compression params +func (o *ListServiceCompressionParams) WithServiceID(serviceID string) *ListServiceCompressionParams { + o.SetServiceID(serviceID) + return o +} + +// SetServiceID adds the serviceId to the list service compression params +func (o *ListServiceCompressionParams) SetServiceID(serviceID string) { + o.ServiceID = serviceID +} + +// WriteToRequest writes these params to a swagger request +func (o *ListServiceCompressionParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + + // path param service_id + if err := r.SetPathParam("service_id", o.ServiceID); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/api/backup/v1/json/client/backup_service/list_service_compression_responses.go b/api/backup/v1/json/client/backup_service/list_service_compression_responses.go new file mode 100644 index 00000000000..ee47172034a --- /dev/null +++ b/api/backup/v1/json/client/backup_service/list_service_compression_responses.go @@ -0,0 +1,493 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package backup_service + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strconv" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + "github.com/go-openapi/validate" +) + +// ListServiceCompressionReader is a Reader for the ListServiceCompression structure. +type ListServiceCompressionReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ListServiceCompressionReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewListServiceCompressionOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + default: + result := NewListServiceCompressionDefault(response.Code()) + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + if response.Code()/100 == 2 { + return result, nil + } + return nil, result + } +} + +// NewListServiceCompressionOK creates a ListServiceCompressionOK with default headers values +func NewListServiceCompressionOK() *ListServiceCompressionOK { + return &ListServiceCompressionOK{} +} + +/* +ListServiceCompressionOK describes a response with status code 200, with default header values. + +A successful response. +*/ +type ListServiceCompressionOK struct { + Payload *ListServiceCompressionOKBody +} + +// IsSuccess returns true when this list service compression Ok response has a 2xx status code +func (o *ListServiceCompressionOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this list service compression Ok response has a 3xx status code +func (o *ListServiceCompressionOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this list service compression Ok response has a 4xx status code +func (o *ListServiceCompressionOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this list service compression Ok response has a 5xx status code +func (o *ListServiceCompressionOK) IsServerError() bool { + return false +} + +// IsCode returns true when this list service compression Ok response a status code equal to that given +func (o *ListServiceCompressionOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the list service compression Ok response +func (o *ListServiceCompressionOK) Code() int { + return 200 +} + +func (o *ListServiceCompressionOK) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /v1/backups/services/{service_id}/compression][%d] listServiceCompressionOk %s", 200, payload) +} + +func (o *ListServiceCompressionOK) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /v1/backups/services/{service_id}/compression][%d] listServiceCompressionOk %s", 200, payload) +} + +func (o *ListServiceCompressionOK) GetPayload() *ListServiceCompressionOKBody { + return o.Payload +} + +func (o *ListServiceCompressionOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(ListServiceCompressionOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewListServiceCompressionDefault creates a ListServiceCompressionDefault with default headers values +func NewListServiceCompressionDefault(code int) *ListServiceCompressionDefault { + return &ListServiceCompressionDefault{ + _statusCode: code, + } +} + +/* +ListServiceCompressionDefault describes a response with status code -1, with default header values. + +An unexpected error response. +*/ +type ListServiceCompressionDefault struct { + _statusCode int + + Payload *ListServiceCompressionDefaultBody +} + +// IsSuccess returns true when this list service compression default response has a 2xx status code +func (o *ListServiceCompressionDefault) IsSuccess() bool { + return o._statusCode/100 == 2 +} + +// IsRedirect returns true when this list service compression default response has a 3xx status code +func (o *ListServiceCompressionDefault) IsRedirect() bool { + return o._statusCode/100 == 3 +} + +// IsClientError returns true when this list service compression default response has a 4xx status code +func (o *ListServiceCompressionDefault) IsClientError() bool { + return o._statusCode/100 == 4 +} + +// IsServerError returns true when this list service compression default response has a 5xx status code +func (o *ListServiceCompressionDefault) IsServerError() bool { + return o._statusCode/100 == 5 +} + +// IsCode returns true when this list service compression default response a status code equal to that given +func (o *ListServiceCompressionDefault) IsCode(code int) bool { + return o._statusCode == code +} + +// Code gets the status code for the list service compression default response +func (o *ListServiceCompressionDefault) Code() int { + return o._statusCode +} + +func (o *ListServiceCompressionDefault) Error() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /v1/backups/services/{service_id}/compression][%d] ListServiceCompression default %s", o._statusCode, payload) +} + +func (o *ListServiceCompressionDefault) String() string { + payload, _ := json.Marshal(o.Payload) + return fmt.Sprintf("[GET /v1/backups/services/{service_id}/compression][%d] ListServiceCompression default %s", o._statusCode, payload) +} + +func (o *ListServiceCompressionDefault) GetPayload() *ListServiceCompressionDefaultBody { + return o.Payload +} + +func (o *ListServiceCompressionDefault) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + o.Payload = new(ListServiceCompressionDefaultBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +/* +ListServiceCompressionDefaultBody list service compression default body +swagger:model ListServiceCompressionDefaultBody +*/ +type ListServiceCompressionDefaultBody struct { + // code + Code int32 `json:"code,omitempty"` + + // message + Message string `json:"message,omitempty"` + + // details + Details []*ListServiceCompressionDefaultBodyDetailsItems0 `json:"details"` +} + +// Validate validates this list service compression default body +func (o *ListServiceCompressionDefaultBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateDetails(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListServiceCompressionDefaultBody) validateDetails(formats strfmt.Registry) error { + if swag.IsZero(o.Details) { // not required + return nil + } + + for i := 0; i < len(o.Details); i++ { + if swag.IsZero(o.Details[i]) { // not required + continue + } + + if o.Details[i] != nil { + if err := o.Details[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ListServiceCompression default" + "." + "details" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ListServiceCompression default" + "." + "details" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + +// ContextValidate validate this list service compression default body based on the context it is used +func (o *ListServiceCompressionDefaultBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + var res []error + + if err := o.contextValidateDetails(ctx, formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +func (o *ListServiceCompressionDefaultBody) contextValidateDetails(ctx context.Context, formats strfmt.Registry) error { + for i := 0; i < len(o.Details); i++ { + if o.Details[i] != nil { + + if swag.IsZero(o.Details[i]) { // not required + return nil + } + + if err := o.Details[i].ContextValidate(ctx, formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("ListServiceCompression default" + "." + "details" + "." + strconv.Itoa(i)) + } else if ce, ok := err.(*errors.CompositeError); ok { + return ce.ValidateName("ListServiceCompression default" + "." + "details" + "." + strconv.Itoa(i)) + } + return err + } + } + } + + return nil +} + +// MarshalBinary interface implementation +func (o *ListServiceCompressionDefaultBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListServiceCompressionDefaultBody) UnmarshalBinary(b []byte) error { + var res ListServiceCompressionDefaultBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListServiceCompressionDefaultBodyDetailsItems0 list service compression default body details items0 +swagger:model ListServiceCompressionDefaultBodyDetailsItems0 +*/ +type ListServiceCompressionDefaultBodyDetailsItems0 struct { + // at type + AtType string `json:"@type,omitempty"` + + // list service compression default body details items0 + ListServiceCompressionDefaultBodyDetailsItems0 map[string]interface{} `json:"-"` +} + +// UnmarshalJSON unmarshals this object with additional properties from JSON +func (o *ListServiceCompressionDefaultBodyDetailsItems0) UnmarshalJSON(data []byte) error { + // stage 1, bind the properties + var stage1 struct { + // at type + AtType string `json:"@type,omitempty"` + } + if err := json.Unmarshal(data, &stage1); err != nil { + return err + } + var rcv ListServiceCompressionDefaultBodyDetailsItems0 + + rcv.AtType = stage1.AtType + *o = rcv + + // stage 2, remove properties and add to map + stage2 := make(map[string]json.RawMessage) + if err := json.Unmarshal(data, &stage2); err != nil { + return err + } + + delete(stage2, "@type") + // stage 3, add additional properties values + if len(stage2) > 0 { + result := make(map[string]interface{}) + for k, v := range stage2 { + var toadd interface{} + if err := json.Unmarshal(v, &toadd); err != nil { + return err + } + result[k] = toadd + } + o.ListServiceCompressionDefaultBodyDetailsItems0 = result + } + + return nil +} + +// MarshalJSON marshals this object with additional properties into a JSON object +func (o ListServiceCompressionDefaultBodyDetailsItems0) MarshalJSON() ([]byte, error) { + var stage1 struct { + // at type + AtType string `json:"@type,omitempty"` + } + + stage1.AtType = o.AtType + + // make JSON object for known properties + props, err := json.Marshal(stage1) + if err != nil { + return nil, err + } + + if len(o.ListServiceCompressionDefaultBodyDetailsItems0) == 0 { // no additional properties + return props, nil + } + + // make JSON object for the additional properties + additional, err := json.Marshal(o.ListServiceCompressionDefaultBodyDetailsItems0) + if err != nil { + return nil, err + } + + if len(props) < 3 { // "{}": only additional properties + return additional, nil + } + + // concatenate the 2 objects + return swag.ConcatJSON(props, additional), nil +} + +// Validate validates this list service compression default body details items0 +func (o *ListServiceCompressionDefaultBodyDetailsItems0) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this list service compression default body details items0 based on context it is used +func (o *ListServiceCompressionDefaultBodyDetailsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListServiceCompressionDefaultBodyDetailsItems0) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListServiceCompressionDefaultBodyDetailsItems0) UnmarshalBinary(b []byte) error { + var res ListServiceCompressionDefaultBodyDetailsItems0 + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ListServiceCompressionOKBody list service compression OK body +swagger:model ListServiceCompressionOKBody +*/ +type ListServiceCompressionOKBody struct { + // Available compression methods for the service. + CompressionMethods []*string `json:"compression_methods"` +} + +// Validate validates this list service compression OK body +func (o *ListServiceCompressionOKBody) Validate(formats strfmt.Registry) error { + var res []error + + if err := o.validateCompressionMethods(formats); err != nil { + res = append(res, err) + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} + +var listServiceCompressionOkBodyCompressionMethodsItemsEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + listServiceCompressionOkBodyCompressionMethodsItemsEnum = append(listServiceCompressionOkBodyCompressionMethodsItemsEnum, v) + } +} + +func (o *ListServiceCompressionOKBody) validateCompressionMethodsItemsEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, listServiceCompressionOkBodyCompressionMethodsItemsEnum, true); err != nil { + return err + } + return nil +} + +func (o *ListServiceCompressionOKBody) validateCompressionMethods(formats strfmt.Registry) error { + if swag.IsZero(o.CompressionMethods) { // not required + return nil + } + + for i := 0; i < len(o.CompressionMethods); i++ { + if swag.IsZero(o.CompressionMethods[i]) { // not required + continue + } + + // value enum + if err := o.validateCompressionMethodsItemsEnum("listServiceCompressionOk"+"."+"compression_methods"+"."+strconv.Itoa(i), "body", *o.CompressionMethods[i]); err != nil { + return err + } + + } + + return nil +} + +// ContextValidate validates this list service compression OK body based on context it is used +func (o *ListServiceCompressionOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ListServiceCompressionOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ListServiceCompressionOKBody) UnmarshalBinary(b []byte) error { + var res ListServiceCompressionOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/api/backup/v1/json/client/backup_service/schedule_backup_responses.go b/api/backup/v1/json/client/backup_service/schedule_backup_responses.go index 84b170cf30d..385bdf96b97 100644 --- a/api/backup/v1/json/client/backup_service/schedule_backup_responses.go +++ b/api/backup/v1/json/client/backup_service/schedule_backup_responses.go @@ -235,6 +235,10 @@ type ScheduleBackupBody struct { // How many artifacts keep. 0 - unlimited. Retention int64 `json:"retention,omitempty"` + + // BackupCompression specifies compression + // Enum: ["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"] + Compression *string `json:"compression,omitempty"` } // Validate validates this schedule backup body @@ -253,6 +257,10 @@ func (o *ScheduleBackupBody) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := o.validateCompression(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -364,6 +372,72 @@ func (o *ScheduleBackupBody) validateDataModel(formats strfmt.Registry) error { return nil } +var scheduleBackupBodyTypeCompressionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + scheduleBackupBodyTypeCompressionPropEnum = append(scheduleBackupBodyTypeCompressionPropEnum, v) + } +} + +const ( + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONUNSPECIFIED captures enum value "BACKUP_COMPRESSION_UNSPECIFIED" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONUNSPECIFIED string = "BACKUP_COMPRESSION_UNSPECIFIED" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONDEFAULT captures enum value "BACKUP_COMPRESSION_DEFAULT" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONDEFAULT string = "BACKUP_COMPRESSION_DEFAULT" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONNONE captures enum value "BACKUP_COMPRESSION_NONE" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONNONE string = "BACKUP_COMPRESSION_NONE" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONQUICKLZ captures enum value "BACKUP_COMPRESSION_QUICKLZ" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONQUICKLZ string = "BACKUP_COMPRESSION_QUICKLZ" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONZSTD captures enum value "BACKUP_COMPRESSION_ZSTD" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONZSTD string = "BACKUP_COMPRESSION_ZSTD" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONLZ4 captures enum value "BACKUP_COMPRESSION_LZ4" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONLZ4 string = "BACKUP_COMPRESSION_LZ4" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONS2 captures enum value "BACKUP_COMPRESSION_S2" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONS2 string = "BACKUP_COMPRESSION_S2" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONGZIP captures enum value "BACKUP_COMPRESSION_GZIP" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONGZIP string = "BACKUP_COMPRESSION_GZIP" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONSNAPPY captures enum value "BACKUP_COMPRESSION_SNAPPY" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONSNAPPY string = "BACKUP_COMPRESSION_SNAPPY" + + // ScheduleBackupBodyCompressionBACKUPCOMPRESSIONPGZIP captures enum value "BACKUP_COMPRESSION_PGZIP" + ScheduleBackupBodyCompressionBACKUPCOMPRESSIONPGZIP string = "BACKUP_COMPRESSION_PGZIP" +) + +// prop value enum +func (o *ScheduleBackupBody) validateCompressionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, scheduleBackupBodyTypeCompressionPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *ScheduleBackupBody) validateCompression(formats strfmt.Registry) error { + if swag.IsZero(o.Compression) { // not required + return nil + } + + // value enum + if err := o.validateCompressionEnum("body"+"."+"compression", "body", *o.Compression); err != nil { + return err + } + + return nil +} + // ContextValidate validates this schedule backup body based on context it is used func (o *ScheduleBackupBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/api/backup/v1/json/client/backup_service/start_backup_responses.go b/api/backup/v1/json/client/backup_service/start_backup_responses.go index 3137ce1bc85..4ab939d92bc 100644 --- a/api/backup/v1/json/client/backup_service/start_backup_responses.go +++ b/api/backup/v1/json/client/backup_service/start_backup_responses.go @@ -217,6 +217,10 @@ type StartBackupBody struct { // Folder on storage for artifact. Folder string `json:"folder,omitempty"` + + // BackupCompression specifies compression + // Enum: ["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"] + Compression *string `json:"compression,omitempty"` } // Validate validates this start backup body @@ -227,6 +231,10 @@ func (o *StartBackupBody) Validate(formats strfmt.Registry) error { res = append(res, err) } + if err := o.validateCompression(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -278,6 +286,72 @@ func (o *StartBackupBody) validateDataModel(formats strfmt.Registry) error { return nil } +var startBackupBodyTypeCompressionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + startBackupBodyTypeCompressionPropEnum = append(startBackupBodyTypeCompressionPropEnum, v) + } +} + +const ( + + // StartBackupBodyCompressionBACKUPCOMPRESSIONUNSPECIFIED captures enum value "BACKUP_COMPRESSION_UNSPECIFIED" + StartBackupBodyCompressionBACKUPCOMPRESSIONUNSPECIFIED string = "BACKUP_COMPRESSION_UNSPECIFIED" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONDEFAULT captures enum value "BACKUP_COMPRESSION_DEFAULT" + StartBackupBodyCompressionBACKUPCOMPRESSIONDEFAULT string = "BACKUP_COMPRESSION_DEFAULT" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONNONE captures enum value "BACKUP_COMPRESSION_NONE" + StartBackupBodyCompressionBACKUPCOMPRESSIONNONE string = "BACKUP_COMPRESSION_NONE" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONQUICKLZ captures enum value "BACKUP_COMPRESSION_QUICKLZ" + StartBackupBodyCompressionBACKUPCOMPRESSIONQUICKLZ string = "BACKUP_COMPRESSION_QUICKLZ" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONZSTD captures enum value "BACKUP_COMPRESSION_ZSTD" + StartBackupBodyCompressionBACKUPCOMPRESSIONZSTD string = "BACKUP_COMPRESSION_ZSTD" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONLZ4 captures enum value "BACKUP_COMPRESSION_LZ4" + StartBackupBodyCompressionBACKUPCOMPRESSIONLZ4 string = "BACKUP_COMPRESSION_LZ4" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONS2 captures enum value "BACKUP_COMPRESSION_S2" + StartBackupBodyCompressionBACKUPCOMPRESSIONS2 string = "BACKUP_COMPRESSION_S2" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONGZIP captures enum value "BACKUP_COMPRESSION_GZIP" + StartBackupBodyCompressionBACKUPCOMPRESSIONGZIP string = "BACKUP_COMPRESSION_GZIP" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONSNAPPY captures enum value "BACKUP_COMPRESSION_SNAPPY" + StartBackupBodyCompressionBACKUPCOMPRESSIONSNAPPY string = "BACKUP_COMPRESSION_SNAPPY" + + // StartBackupBodyCompressionBACKUPCOMPRESSIONPGZIP captures enum value "BACKUP_COMPRESSION_PGZIP" + StartBackupBodyCompressionBACKUPCOMPRESSIONPGZIP string = "BACKUP_COMPRESSION_PGZIP" +) + +// prop value enum +func (o *StartBackupBody) validateCompressionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, startBackupBodyTypeCompressionPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *StartBackupBody) validateCompression(formats strfmt.Registry) error { + if swag.IsZero(o.Compression) { // not required + return nil + } + + // value enum + if err := o.validateCompressionEnum("body"+"."+"compression", "body", *o.Compression); err != nil { + return err + } + + return nil +} + // ContextValidate validates this start backup body based on context it is used func (o *StartBackupBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/api/backup/v1/json/client/restore_service/list_restores_responses.go b/api/backup/v1/json/client/restore_service/list_restores_responses.go index d3b45468b2f..d785d7459de 100644 --- a/api/backup/v1/json/client/restore_service/list_restores_responses.go +++ b/api/backup/v1/json/client/restore_service/list_restores_responses.go @@ -575,6 +575,10 @@ type ListRestoresOKBodyItemsItems0 struct { // PITR timestamp is filled for PITR restores, empty otherwise. // Format: date-time PitrTimestamp strfmt.DateTime `json:"pitr_timestamp,omitempty"` + + // BackupCompression specifies compression + // Enum: ["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"] + Compression *string `json:"compression,omitempty"` } // Validate validates this list restores OK body items items0 @@ -601,6 +605,10 @@ func (o *ListRestoresOKBodyItemsItems0) Validate(formats strfmt.Registry) error res = append(res, err) } + if err := o.validateCompression(formats); err != nil { + res = append(res, err) + } + if len(res) > 0 { return errors.CompositeValidationError(res...) } @@ -736,6 +744,72 @@ func (o *ListRestoresOKBodyItemsItems0) validatePitrTimestamp(formats strfmt.Reg return nil } +var listRestoresOkBodyItemsItems0TypeCompressionPropEnum []interface{} + +func init() { + var res []string + if err := json.Unmarshal([]byte(`["BACKUP_COMPRESSION_UNSPECIFIED","BACKUP_COMPRESSION_DEFAULT","BACKUP_COMPRESSION_NONE","BACKUP_COMPRESSION_QUICKLZ","BACKUP_COMPRESSION_ZSTD","BACKUP_COMPRESSION_LZ4","BACKUP_COMPRESSION_S2","BACKUP_COMPRESSION_GZIP","BACKUP_COMPRESSION_SNAPPY","BACKUP_COMPRESSION_PGZIP"]`), &res); err != nil { + panic(err) + } + for _, v := range res { + listRestoresOkBodyItemsItems0TypeCompressionPropEnum = append(listRestoresOkBodyItemsItems0TypeCompressionPropEnum, v) + } +} + +const ( + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONUNSPECIFIED captures enum value "BACKUP_COMPRESSION_UNSPECIFIED" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONUNSPECIFIED string = "BACKUP_COMPRESSION_UNSPECIFIED" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONDEFAULT captures enum value "BACKUP_COMPRESSION_DEFAULT" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONDEFAULT string = "BACKUP_COMPRESSION_DEFAULT" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONNONE captures enum value "BACKUP_COMPRESSION_NONE" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONNONE string = "BACKUP_COMPRESSION_NONE" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONQUICKLZ captures enum value "BACKUP_COMPRESSION_QUICKLZ" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONQUICKLZ string = "BACKUP_COMPRESSION_QUICKLZ" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONZSTD captures enum value "BACKUP_COMPRESSION_ZSTD" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONZSTD string = "BACKUP_COMPRESSION_ZSTD" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONLZ4 captures enum value "BACKUP_COMPRESSION_LZ4" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONLZ4 string = "BACKUP_COMPRESSION_LZ4" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONS2 captures enum value "BACKUP_COMPRESSION_S2" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONS2 string = "BACKUP_COMPRESSION_S2" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONGZIP captures enum value "BACKUP_COMPRESSION_GZIP" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONGZIP string = "BACKUP_COMPRESSION_GZIP" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONSNAPPY captures enum value "BACKUP_COMPRESSION_SNAPPY" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONSNAPPY string = "BACKUP_COMPRESSION_SNAPPY" + + // ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONPGZIP captures enum value "BACKUP_COMPRESSION_PGZIP" + ListRestoresOKBodyItemsItems0CompressionBACKUPCOMPRESSIONPGZIP string = "BACKUP_COMPRESSION_PGZIP" +) + +// prop value enum +func (o *ListRestoresOKBodyItemsItems0) validateCompressionEnum(path, location string, value string) error { + if err := validate.EnumCase(path, location, value, listRestoresOkBodyItemsItems0TypeCompressionPropEnum, true); err != nil { + return err + } + return nil +} + +func (o *ListRestoresOKBodyItemsItems0) validateCompression(formats strfmt.Registry) error { + if swag.IsZero(o.Compression) { // not required + return nil + } + + // value enum + if err := o.validateCompressionEnum("compression", "body", *o.Compression); err != nil { + return err + } + + return nil +} + // ContextValidate validates this list restores OK body items items0 based on context it is used func (o *ListRestoresOKBodyItemsItems0) ContextValidate(ctx context.Context, formats strfmt.Registry) error { return nil diff --git a/api/backup/v1/json/v1.json b/api/backup/v1/json/v1.json index c599ce356ce..f16e19f0789 100644 --- a/api/backup/v1/json/v1.json +++ b/api/backup/v1/json/v1.json @@ -173,6 +173,24 @@ } }, "x-order": 13 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 14 } } }, @@ -947,6 +965,24 @@ "type": "string", "format": "date-time", "x-order": 12 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 13 } } }, @@ -1299,6 +1335,24 @@ "type": "integer", "format": "int64", "x-order": 18 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 19 } } }, @@ -1341,6 +1395,88 @@ } } }, + "/v1/backups/services/{service_id}/compression": { + "get": { + "description": "Return a list of available compression methods for a service.", + "tags": [ + "BackupService" + ], + "summary": "List Service Compression", + "operationId": "ListServiceCompression", + "parameters": [ + { + "type": "string", + "description": "Service identifier.", + "name": "service_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "compression_methods": { + "description": "Available compression methods for the service.", + "type": "array", + "items": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ] + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "message": { + "type": "string", + "x-order": 1 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": {} + }, + "x-order": 2 + } + } + } + } + } + } + }, "/v1/backups/{artifact_id}/compatible-services": { "get": { "description": "List services that are compatible with the backup artifact.", @@ -1906,6 +2042,24 @@ "type": "integer", "format": "int64", "x-order": 12 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 13 } } } @@ -2020,6 +2174,24 @@ "description": "Folder on storage for artifact.", "type": "string", "x-order": 7 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 8 } } } diff --git a/api/backup/v1/restores.pb.go b/api/backup/v1/restores.pb.go index 756d4b373bf..04546e1b322 100644 --- a/api/backup/v1/restores.pb.go +++ b/api/backup/v1/restores.pb.go @@ -108,6 +108,8 @@ type RestoreHistoryItem struct { FinishedAt *timestamppb.Timestamp `protobuf:"bytes,12,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` // PITR timestamp is filled for PITR restores, empty otherwise. PitrTimestamp *timestamppb.Timestamp `protobuf:"bytes,13,opt,name=pitr_timestamp,json=pitrTimestamp,proto3" json:"pitr_timestamp,omitempty"` + // Compression + Compression BackupCompression `protobuf:"varint,14,opt,name=compression,proto3,enum=backup.v1.BackupCompression" json:"compression,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -233,6 +235,13 @@ func (x *RestoreHistoryItem) GetPitrTimestamp() *timestamppb.Timestamp { return nil } +func (x *RestoreHistoryItem) GetCompression() BackupCompression { + if x != nil { + return x.Compression + } + return BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED +} + type ListRestoresRequest struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -537,7 +546,7 @@ var File_backup_v1_restores_proto protoreflect.FileDescriptor const file_backup_v1_restores_proto_rawDesc = "" + "\n" + - "\x18backup/v1/restores.proto\x12\tbackup.v1\x1a\x16backup/v1/common.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a.protoc-gen-openapiv2/options/annotations.proto\x1a\x17validate/validate.proto\"\xaa\x04\n" + + "\x18backup/v1/restores.proto\x12\tbackup.v1\x1a\x16backup/v1/common.proto\x1a\x1cgoogle/api/annotations.proto\x1a\x1fgoogle/protobuf/timestamp.proto\x1a.protoc-gen-openapiv2/options/annotations.proto\x1a\x17validate/validate.proto\"\xea\x04\n" + "\x12RestoreHistoryItem\x12\x1d\n" + "\n" + "restore_id\x18\x01 \x01(\tR\trestoreId\x12\x1f\n" + @@ -559,7 +568,8 @@ const file_backup_v1_restores_proto_rawDesc = "" + "started_at\x18\v \x01(\v2\x1a.google.protobuf.TimestampR\tstartedAt\x12;\n" + "\vfinished_at\x18\f \x01(\v2\x1a.google.protobuf.TimestampR\n" + "finishedAt\x12A\n" + - "\x0epitr_timestamp\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\rpitrTimestamp\"\x15\n" + + "\x0epitr_timestamp\x18\r \x01(\v2\x1a.google.protobuf.TimestampR\rpitrTimestamp\x12>\n" + + "\vcompression\x18\x0e \x01(\x0e2\x1c.backup.v1.BackupCompressionR\vcompression\"\x15\n" + "\x13ListRestoresRequest\"K\n" + "\x14ListRestoresResponse\x123\n" + "\x05items\x18\x01 \x03(\v2\x1d.backup.v1.RestoreHistoryItemR\x05items\"t\n" + @@ -622,7 +632,8 @@ var ( (*RestoreBackupResponse)(nil), // 7: backup.v1.RestoreBackupResponse (DataModel)(0), // 8: backup.v1.DataModel (*timestamppb.Timestamp)(nil), // 9: google.protobuf.Timestamp - (*LogChunk)(nil), // 10: backup.v1.LogChunk + (BackupCompression)(0), // 10: backup.v1.BackupCompression + (*LogChunk)(nil), // 11: backup.v1.LogChunk } ) @@ -632,20 +643,21 @@ var file_backup_v1_restores_proto_depIdxs = []int32{ 9, // 2: backup.v1.RestoreHistoryItem.started_at:type_name -> google.protobuf.Timestamp 9, // 3: backup.v1.RestoreHistoryItem.finished_at:type_name -> google.protobuf.Timestamp 9, // 4: backup.v1.RestoreHistoryItem.pitr_timestamp:type_name -> google.protobuf.Timestamp - 1, // 5: backup.v1.ListRestoresResponse.items:type_name -> backup.v1.RestoreHistoryItem - 10, // 6: backup.v1.RestoreServiceGetLogsResponse.logs:type_name -> backup.v1.LogChunk - 9, // 7: backup.v1.RestoreBackupRequest.pitr_timestamp:type_name -> google.protobuf.Timestamp - 2, // 8: backup.v1.RestoreService.ListRestores:input_type -> backup.v1.ListRestoresRequest - 4, // 9: backup.v1.RestoreService.GetLogs:input_type -> backup.v1.RestoreServiceGetLogsRequest - 6, // 10: backup.v1.RestoreService.RestoreBackup:input_type -> backup.v1.RestoreBackupRequest - 3, // 11: backup.v1.RestoreService.ListRestores:output_type -> backup.v1.ListRestoresResponse - 5, // 12: backup.v1.RestoreService.GetLogs:output_type -> backup.v1.RestoreServiceGetLogsResponse - 7, // 13: backup.v1.RestoreService.RestoreBackup:output_type -> backup.v1.RestoreBackupResponse - 11, // [11:14] is the sub-list for method output_type - 8, // [8:11] is the sub-list for method input_type - 8, // [8:8] is the sub-list for extension type_name - 8, // [8:8] is the sub-list for extension extendee - 0, // [0:8] is the sub-list for field type_name + 10, // 5: backup.v1.RestoreHistoryItem.compression:type_name -> backup.v1.BackupCompression + 1, // 6: backup.v1.ListRestoresResponse.items:type_name -> backup.v1.RestoreHistoryItem + 11, // 7: backup.v1.RestoreServiceGetLogsResponse.logs:type_name -> backup.v1.LogChunk + 9, // 8: backup.v1.RestoreBackupRequest.pitr_timestamp:type_name -> google.protobuf.Timestamp + 2, // 9: backup.v1.RestoreService.ListRestores:input_type -> backup.v1.ListRestoresRequest + 4, // 10: backup.v1.RestoreService.GetLogs:input_type -> backup.v1.RestoreServiceGetLogsRequest + 6, // 11: backup.v1.RestoreService.RestoreBackup:input_type -> backup.v1.RestoreBackupRequest + 3, // 12: backup.v1.RestoreService.ListRestores:output_type -> backup.v1.ListRestoresResponse + 5, // 13: backup.v1.RestoreService.GetLogs:output_type -> backup.v1.RestoreServiceGetLogsResponse + 7, // 14: backup.v1.RestoreService.RestoreBackup:output_type -> backup.v1.RestoreBackupResponse + 12, // [12:15] is the sub-list for method output_type + 9, // [9:12] is the sub-list for method input_type + 9, // [9:9] is the sub-list for extension type_name + 9, // [9:9] is the sub-list for extension extendee + 0, // [0:9] is the sub-list for field type_name } func init() { file_backup_v1_restores_proto_init() } diff --git a/api/backup/v1/restores.pb.validate.go b/api/backup/v1/restores.pb.validate.go index 0f530c1fc99..dd13e827552 100644 --- a/api/backup/v1/restores.pb.validate.go +++ b/api/backup/v1/restores.pb.validate.go @@ -164,6 +164,8 @@ func (m *RestoreHistoryItem) validate(all bool) error { } } + // no validation rules for Compression + if len(errors) > 0 { return RestoreHistoryItemMultiError(errors) } diff --git a/api/backup/v1/restores.proto b/api/backup/v1/restores.proto index 21b9d7906ec..8230c0dc458 100644 --- a/api/backup/v1/restores.proto +++ b/api/backup/v1/restores.proto @@ -44,6 +44,8 @@ message RestoreHistoryItem { google.protobuf.Timestamp finished_at = 12; // PITR timestamp is filled for PITR restores, empty otherwise. google.protobuf.Timestamp pitr_timestamp = 13; + // Compression + BackupCompression compression = 14; } message ListRestoresRequest {} diff --git a/api/swagger/swagger-dev.json b/api/swagger/swagger-dev.json index 58296dcb696..71b66f5fb9c 100644 --- a/api/swagger/swagger-dev.json +++ b/api/swagger/swagger-dev.json @@ -2808,6 +2808,24 @@ } }, "x-order": 13 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 14 } } }, @@ -3582,6 +3600,24 @@ "type": "string", "format": "date-time", "x-order": 12 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 13 } } }, @@ -3934,6 +3970,24 @@ "type": "integer", "format": "int64", "x-order": 18 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 19 } } }, @@ -3976,6 +4030,88 @@ } } }, + "/v1/backups/services/{service_id}/compression": { + "get": { + "description": "Return a list of available compression methods for a service.", + "tags": [ + "BackupService" + ], + "summary": "List Service Compression", + "operationId": "ListServiceCompression", + "parameters": [ + { + "type": "string", + "description": "Service identifier.", + "name": "service_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "compression_methods": { + "description": "Available compression methods for the service.", + "type": "array", + "items": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ] + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "message": { + "type": "string", + "x-order": 1 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": {} + }, + "x-order": 2 + } + } + } + } + } + } + }, "/v1/backups/{artifact_id}/compatible-services": { "get": { "description": "List services that are compatible with the backup artifact.", @@ -4541,6 +4677,24 @@ "type": "integer", "format": "int64", "x-order": 12 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 13 } } } @@ -4655,6 +4809,24 @@ "description": "Folder on storage for artifact.", "type": "string", "x-order": 7 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 8 } } } diff --git a/api/swagger/swagger.json b/api/swagger/swagger.json index 76c0c334412..541ebaafca3 100644 --- a/api/swagger/swagger.json +++ b/api/swagger/swagger.json @@ -2291,6 +2291,24 @@ } }, "x-order": 13 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 14 } } }, @@ -3065,6 +3083,24 @@ "type": "string", "format": "date-time", "x-order": 12 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 13 } } }, @@ -3417,6 +3453,24 @@ "type": "integer", "format": "int64", "x-order": 18 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 19 } } }, @@ -3459,6 +3513,88 @@ } } }, + "/v1/backups/services/{service_id}/compression": { + "get": { + "description": "Return a list of available compression methods for a service.", + "tags": [ + "BackupService" + ], + "summary": "List Service Compression", + "operationId": "ListServiceCompression", + "parameters": [ + { + "type": "string", + "description": "Service identifier.", + "name": "service_id", + "in": "path", + "required": true + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "type": "object", + "properties": { + "compression_methods": { + "description": "Available compression methods for the service.", + "type": "array", + "items": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ] + }, + "x-order": 0 + } + } + } + }, + "default": { + "description": "An unexpected error response.", + "schema": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32", + "x-order": 0 + }, + "message": { + "type": "string", + "x-order": 1 + }, + "details": { + "type": "array", + "items": { + "type": "object", + "properties": { + "@type": { + "type": "string", + "x-order": 0 + } + }, + "additionalProperties": {} + }, + "x-order": 2 + } + } + } + } + } + } + }, "/v1/backups/{artifact_id}/compatible-services": { "get": { "description": "List services that are compatible with the backup artifact.", @@ -4024,6 +4160,24 @@ "type": "integer", "format": "int64", "x-order": 12 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 13 } } } @@ -4138,6 +4292,24 @@ "description": "Folder on storage for artifact.", "type": "string", "x-order": 7 + }, + "compression": { + "type": "string", + "title": "BackupCompression specifies compression", + "default": "BACKUP_COMPRESSION_UNSPECIFIED", + "enum": [ + "BACKUP_COMPRESSION_UNSPECIFIED", + "BACKUP_COMPRESSION_DEFAULT", + "BACKUP_COMPRESSION_NONE", + "BACKUP_COMPRESSION_QUICKLZ", + "BACKUP_COMPRESSION_ZSTD", + "BACKUP_COMPRESSION_LZ4", + "BACKUP_COMPRESSION_S2", + "BACKUP_COMPRESSION_GZIP", + "BACKUP_COMPRESSION_SNAPPY", + "BACKUP_COMPRESSION_PGZIP" + ], + "x-order": 8 } } } diff --git a/docs/api/backups/start-backup.md b/docs/api/backups/start-backup.md index 965483a7eb1..7c1a6f497f0 100644 --- a/docs/api/backups/start-backup.md +++ b/docs/api/backups/start-backup.md @@ -25,7 +25,8 @@ curl --insecure -X POST \ "name": "Test Backup", "description": "Test Backup", "retry_interval": "60s", - "retries": 1 + "retries": 1, + "compression": "BACKUP_COMPRESSION_ZSTD" } ' ``` @@ -36,9 +37,29 @@ Also, you require the [service_id](ref:listservices) and [location_id](ref:listl You can defined a `name` and a `description` for each backup. You can also configure `retry_interval` and `retries` if required. +### Compression Options + +The `compression` field allows you to specify the compression algorithm for the backup. Available options are: + +- `BACKUP_COMPRESSION_DEFAULT` - Default compression on service backup tool +- `BACKUP_COMPRESSION_NONE` - No compression +- `BACKUP_COMPRESSION_QUICKLZ` - QuickLZ compression +- `BACKUP_COMPRESSION_ZSTD` - Zstandard compression +- `BACKUP_COMPRESSION_LZ4` - LZ4 compression +- `BACKUP_COMPRESSION_S2` - S2 compression +- `BACKUP_COMPRESSION_GZIP` - Gzip compression +- `BACKUP_COMPRESSION_SNAPPY` - Snappy compression +- `BACKUP_COMPRESSION_PGZIP` - Parallel Gzip compression + +**Database-specific support:** + +- **MySQL**: QUICKLZ, ZSTD, LZ4, NONE +- **MongoDB**: GZIP, SNAPPY, LZ4, S2, PGZIP, ZSTD, NONE + ### Error messages The API call could return an error message in the details, containing a specific ErrorCode indicating the failure reason: - ERROR_CODE_XTRABACKUP_NOT_INSTALLED - xtrabackup is not installed on the service - ERROR_CODE_INVALID_XTRABACKUP - different versions of xtrabackup and xbcloud - ERROR_CODE_INCOMPATIBLE_XTRABACKUP - xtrabackup is not compatible with MySQL to make a backup +- ERROR_CODE_INVALID_COMPRESSION - invalid or unsupported compression type for the database diff --git a/managed/models/artifact_helpers.go b/managed/models/artifact_helpers.go index 1cf76d1dd60..f51b74c8605 100644 --- a/managed/models/artifact_helpers.go +++ b/managed/models/artifact_helpers.go @@ -183,6 +183,7 @@ type CreateArtifactParams struct { DataModel DataModel Mode BackupMode Status BackupStatus + Compression BackupCompression ScheduleID string IsShardedCluster bool Folder string @@ -203,6 +204,10 @@ func (p *CreateArtifactParams) Validate() error { return NewInvalidArgumentError("service_id shouldn't be empty") } + if err := p.Compression.Validate(); err != nil { + return err + } + if err := p.Mode.Validate(); err != nil { return err } @@ -248,6 +253,7 @@ func CreateArtifact(q *reform.Querier, params CreateArtifactParams) (*Artifact, ScheduleID: params.ScheduleID, IsShardedCluster: params.IsShardedCluster, Folder: params.Folder, + Compression: params.Compression, } if params.ScheduleID != "" { diff --git a/managed/models/artifact_helpers_test.go b/managed/models/artifact_helpers_test.go index 87605dd22a4..5c3d1b4eda3 100644 --- a/managed/models/artifact_helpers_test.go +++ b/managed/models/artifact_helpers_test.go @@ -98,14 +98,15 @@ func TestArtifacts(t *testing.T) { prepareLocationsAndService(q) createParams := models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: locationID1, - ServiceID: serviceID1, - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, - Folder: "artifact_folder", + Name: "backup_name", + Vendor: "MySQL", + LocationID: locationID1, + ServiceID: serviceID1, + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Folder: "artifact_folder", + Compression: models.Default, } a, err := models.CreateArtifact(q, createParams) @@ -148,33 +149,36 @@ func TestArtifacts(t *testing.T) { prepareLocationsAndService(q) params1 := models.CreateArtifactParams{ - Name: "backup_name_1", - Vendor: "MySQL", - LocationID: locationID1, - ServiceID: serviceID1, - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name_1", + Vendor: "MySQL", + LocationID: locationID1, + ServiceID: serviceID1, + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, } params2 := models.CreateArtifactParams{ - Name: "backup_name_2", - Vendor: "PostgreSQL", - LocationID: locationID2, - ServiceID: serviceID2, - DataModel: models.LogicalDataModel, - Status: models.PausedBackupStatus, - Mode: models.Snapshot, + Name: "backup_name_2", + Vendor: "PostgreSQL", + LocationID: locationID2, + ServiceID: serviceID2, + DataModel: models.LogicalDataModel, + Status: models.PausedBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, } params3 := models.CreateArtifactParams{ - Name: "backup_name_3", - Vendor: "mongodb", - LocationID: locationID2, - ServiceID: serviceID2, - DataModel: models.LogicalDataModel, - Status: models.SuccessBackupStatus, - Mode: models.Snapshot, - Folder: "some_folder", + Name: "backup_name_3", + Vendor: "mongodb", + LocationID: locationID2, + ServiceID: serviceID2, + DataModel: models.LogicalDataModel, + Status: models.SuccessBackupStatus, + Mode: models.Snapshot, + Folder: "some_folder", + Compression: models.Default, } a1, err := models.CreateArtifact(q, params1) @@ -226,13 +230,14 @@ func TestArtifacts(t *testing.T) { prepareLocationsAndService(q) params := models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: locationID1, - ServiceID: serviceID1, - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name", + Vendor: "MySQL", + LocationID: locationID1, + ServiceID: serviceID1, + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, } b, err := models.CreateArtifact(q, params) @@ -257,13 +262,14 @@ func TestArtifacts(t *testing.T) { prepareLocationsAndService(q) params := models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MongoDB", - LocationID: locationID1, - ServiceID: serviceID1, - DataModel: models.LogicalDataModel, - Status: models.SuccessBackupStatus, - Mode: models.PITR, + Name: "backup_name", + Vendor: "MongoDB", + LocationID: locationID1, + ServiceID: serviceID1, + DataModel: models.LogicalDataModel, + Status: models.SuccessBackupStatus, + Mode: models.PITR, + Compression: models.Default, } a, err := models.CreateArtifact(q, params) @@ -312,113 +318,122 @@ func TestArtifactValidation(t *testing.T) { { name: "name missing", params: models.CreateArtifactParams{ - Vendor: "MySQL", - LocationID: "location_id", - ServiceID: "service_id", - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Vendor: "MySQL", + LocationID: "location_id", + ServiceID: "service_id", + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: name shouldn't be empty", }, { name: "vendor missing", params: models.CreateArtifactParams{ - Name: "backup_name", - LocationID: "location_id", - ServiceID: "service_id", - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name", + LocationID: "location_id", + ServiceID: "service_id", + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: vendor shouldn't be empty", }, { name: "location missing", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - ServiceID: "service_id", - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name", + Vendor: "MySQL", + ServiceID: "service_id", + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: location_id shouldn't be empty", }, { name: "service missing", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: "location_id", - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name", + Vendor: "MySQL", + LocationID: "location_id", + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: service_id shouldn't be empty", }, { name: "empty backup mode", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: "location_id", - ServiceID: "service_id", - Mode: "", - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, + Name: "backup_name", + Vendor: "MySQL", + LocationID: "location_id", + ServiceID: "service_id", + Mode: "", + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Compression: models.Default, }, errorMsg: "invalid argument: empty backup mode", }, { name: "empty data model", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: "location_id", - ServiceID: "service_id", - DataModel: "", - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name", + Vendor: "MySQL", + LocationID: "location_id", + ServiceID: "service_id", + DataModel: "", + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: empty data model", }, { name: "invalid data model", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: "location_id", - ServiceID: "service_id", - DataModel: models.DataModel("invalid"), - Status: models.PendingBackupStatus, - Mode: models.Snapshot, + Name: "backup_name", + Vendor: "MySQL", + LocationID: "location_id", + ServiceID: "service_id", + DataModel: models.DataModel("invalid"), + Status: models.PendingBackupStatus, + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: invalid data model 'invalid'", }, { name: "invalid status", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: "location_id", - ServiceID: "service_id", - DataModel: models.PhysicalDataModel, - Status: models.BackupStatus("invalid"), - Mode: models.Snapshot, + Name: "backup_name", + Vendor: "MySQL", + LocationID: "location_id", + ServiceID: "service_id", + DataModel: models.PhysicalDataModel, + Status: models.BackupStatus("invalid"), + Mode: models.Snapshot, + Compression: models.Default, }, errorMsg: "invalid argument: invalid status 'invalid'", }, { name: "invalid mode", params: models.CreateArtifactParams{ - Name: "backup_name", - Vendor: "MySQL", - LocationID: "location_id", - ServiceID: "service_id", - DataModel: models.PhysicalDataModel, - Status: models.PendingBackupStatus, - Mode: models.BackupMode("invalid"), + Name: "backup_name", + Vendor: "MySQL", + LocationID: "location_id", + ServiceID: "service_id", + DataModel: models.PhysicalDataModel, + Status: models.PendingBackupStatus, + Mode: models.BackupMode("invalid"), + Compression: models.Default, }, errorMsg: "invalid argument: invalid backup mode 'invalid'", }, diff --git a/managed/models/artifact_model.go b/managed/models/artifact_model.go index 88b4f62e0e6..f262618ce9b 100644 --- a/managed/models/artifact_model.go +++ b/managed/models/artifact_model.go @@ -17,6 +17,7 @@ package models import ( "database/sql/driver" + "slices" "time" "gopkg.in/reform.v1" @@ -118,6 +119,90 @@ func (m BackupMode) Validate() error { return nil } +// BackupCompression represents compression algorithm used for backup. +type BackupCompression string + +// BackupCompression types. +const ( + Default BackupCompression = "default" + None BackupCompression = "none" + QuickLZ BackupCompression = "quicklz" + ZSTD BackupCompression = "zstd" + LZ4 BackupCompression = "lz4" + S2 BackupCompression = "s2" + GZIP BackupCompression = "gzip" + Snappy BackupCompression = "snappy" + PGZIP BackupCompression = "pgzip" +) + +// compressionSupport defines which compression methods are supported by each service type +var compressionSupport = map[ServiceType][]BackupCompression{ + MySQLServiceType: { + Default, + QuickLZ, + ZSTD, + LZ4, + None, + }, + MongoDBServiceType: { + Default, + GZIP, + Snappy, + LZ4, + S2, + PGZIP, + ZSTD, + None, + }, +} + +// GetSupportedCompressions returns the list of compression methods supported by a service type +func GetSupportedCompressions(serviceType ServiceType) []BackupCompression { + if compressions, exists := compressionSupport[serviceType]; exists { + return compressions + } + return nil +} + +// Validate validates compression. +func (c BackupCompression) Validate() error { + switch c { + case Default: + case QuickLZ: + case LZ4: + case ZSTD: + case S2: + case GZIP: + case Snappy: + case PGZIP: + case None: + case "": + return NewInvalidArgumentError("empty compression") + default: + return NewInvalidArgumentError("invalid compression '%s'", c) + } + + return nil +} + +// ValidateForServiceType validates compression for a specific service type. +func (c BackupCompression) ValidateForServiceType(serviceType ServiceType) error { + if err := c.Validate(); err != nil { + return err + } + + supported := GetSupportedCompressions(serviceType) + if supported == nil { + return NewInvalidArgumentError("compression is not yet supported for service type '%s'", serviceType) + } + + if slices.Contains(supported, c) { + return nil + } + + return NewInvalidArgumentError("compression '%s' is not supported for service type '%s'", c, serviceType) +} + // File represents file or directory. type File struct { Name string `json:"name"` @@ -155,22 +240,23 @@ func (p *MetadataList) Scan(src interface{}) error { return jsonScan(p, src) } // //reform:artifacts type Artifact struct { - ID string `reform:"id,pk"` - Name string `reform:"name"` - Vendor string `reform:"vendor"` - DBVersion string `reform:"db_version"` - LocationID string `reform:"location_id"` - ServiceID string `reform:"service_id"` - DataModel DataModel `reform:"data_model"` - Mode BackupMode `reform:"mode"` - Status BackupStatus `reform:"status"` - Type ArtifactType `reform:"type"` - ScheduleID string `reform:"schedule_id"` - CreatedAt time.Time `reform:"created_at"` - UpdatedAt time.Time `reform:"updated_at"` - IsShardedCluster bool `reform:"is_sharded_cluster"` - Folder string `reform:"folder"` - MetadataList MetadataList `reform:"metadata_list"` + ID string `reform:"id,pk"` + Name string `reform:"name"` + Vendor string `reform:"vendor"` + DBVersion string `reform:"db_version"` + LocationID string `reform:"location_id"` + ServiceID string `reform:"service_id"` + DataModel DataModel `reform:"data_model"` + Mode BackupMode `reform:"mode"` + Status BackupStatus `reform:"status"` + Type ArtifactType `reform:"type"` + Compression BackupCompression `reform:"compression"` + ScheduleID string `reform:"schedule_id"` + CreatedAt time.Time `reform:"created_at"` + UpdatedAt time.Time `reform:"updated_at"` + IsShardedCluster bool `reform:"is_sharded_cluster"` + Folder string `reform:"folder"` + MetadataList MetadataList `reform:"metadata_list"` } // BeforeInsert implements reform.BeforeInserter interface. diff --git a/managed/models/artifact_model_reform.go b/managed/models/artifact_model_reform.go index 6bd816c51b6..6f5bf08a2bb 100644 --- a/managed/models/artifact_model_reform.go +++ b/managed/models/artifact_model_reform.go @@ -38,6 +38,7 @@ func (v *artifactTableType) Columns() []string { "mode", "status", "type", + "compression", "schedule_id", "created_at", "updated_at", @@ -78,6 +79,7 @@ var ArtifactTable = &artifactTableType{ {Name: "Mode", Type: "BackupMode", Column: "mode"}, {Name: "Status", Type: "BackupStatus", Column: "status"}, {Name: "Type", Type: "ArtifactType", Column: "type"}, + {Name: "Compression", Type: "BackupCompression", Column: "compression"}, {Name: "ScheduleID", Type: "string", Column: "schedule_id"}, {Name: "CreatedAt", Type: "time.Time", Column: "created_at"}, {Name: "UpdatedAt", Type: "time.Time", Column: "updated_at"}, @@ -92,7 +94,7 @@ var ArtifactTable = &artifactTableType{ // String returns a string representation of this struct or record. func (s Artifact) String() string { - res := make([]string, 16) + res := make([]string, 17) res[0] = "ID: " + reform.Inspect(s.ID, true) res[1] = "Name: " + reform.Inspect(s.Name, true) res[2] = "Vendor: " + reform.Inspect(s.Vendor, true) @@ -103,12 +105,13 @@ func (s Artifact) String() string { res[7] = "Mode: " + reform.Inspect(s.Mode, true) res[8] = "Status: " + reform.Inspect(s.Status, true) res[9] = "Type: " + reform.Inspect(s.Type, true) - res[10] = "ScheduleID: " + reform.Inspect(s.ScheduleID, true) - res[11] = "CreatedAt: " + reform.Inspect(s.CreatedAt, true) - res[12] = "UpdatedAt: " + reform.Inspect(s.UpdatedAt, true) - res[13] = "IsShardedCluster: " + reform.Inspect(s.IsShardedCluster, true) - res[14] = "Folder: " + reform.Inspect(s.Folder, true) - res[15] = "MetadataList: " + reform.Inspect(s.MetadataList, true) + res[10] = "Compression: " + reform.Inspect(s.Compression, true) + res[11] = "ScheduleID: " + reform.Inspect(s.ScheduleID, true) + res[12] = "CreatedAt: " + reform.Inspect(s.CreatedAt, true) + res[13] = "UpdatedAt: " + reform.Inspect(s.UpdatedAt, true) + res[14] = "IsShardedCluster: " + reform.Inspect(s.IsShardedCluster, true) + res[15] = "Folder: " + reform.Inspect(s.Folder, true) + res[16] = "MetadataList: " + reform.Inspect(s.MetadataList, true) return strings.Join(res, ", ") } @@ -126,6 +129,7 @@ func (s *Artifact) Values() []interface{} { s.Mode, s.Status, s.Type, + s.Compression, s.ScheduleID, s.CreatedAt, s.UpdatedAt, @@ -149,6 +153,7 @@ func (s *Artifact) Pointers() []interface{} { &s.Mode, &s.Status, &s.Type, + &s.Compression, &s.ScheduleID, &s.CreatedAt, &s.UpdatedAt, diff --git a/managed/models/database.go b/managed/models/database.go index 85431e42682..db3dbec7b77 100644 --- a/managed/models/database.go +++ b/managed/models/database.go @@ -1148,6 +1148,12 @@ var databaseSchema = [][]string{ 112: { `UPDATE agents SET disabled = true WHERE agent_type = 'qan-postgresql-pgstatmonitor-agent' AND pmm_agent_id = 'pmm-server'`, }, + 113: { + `ALTER TABLE artifacts + ADD COLUMN compression VARCHAR NOT NULL DEFAULT 'default'`, + + `ALTER TABLE artifacts ALTER COLUMN compression DROP DEFAULT`, + }, } // ^^^ Avoid default values in schema definition. ^^^ diff --git a/managed/models/location_helpers_test.go b/managed/models/location_helpers_test.go index 83edaa0e54f..68a03215e58 100644 --- a/managed/models/location_helpers_test.go +++ b/managed/models/location_helpers_test.go @@ -311,13 +311,14 @@ func TestBackupLocations(t *testing.T) { require.NoError(t, q.Insert(s)) artifact, err := models.CreateArtifact(q, models.CreateArtifactParams{ - Name: "artifact", - Vendor: "mysql", - LocationID: loc.ID, - ServiceID: serviceID1, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "artifact", + Vendor: "mysql", + LocationID: loc.ID, + ServiceID: serviceID1, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }) require.NoError(t, err) diff --git a/managed/models/scheduled_task_models.go b/managed/models/scheduled_task_models.go index b4419531f10..858d7fb18fa 100644 --- a/managed/models/scheduled_task_models.go +++ b/managed/models/scheduled_task_models.go @@ -59,17 +59,18 @@ type ScheduledTaskData struct { // CommonBackupTaskData contains common data for all backup tasks. type CommonBackupTaskData struct { - ServiceID string `json:"service_id"` - ClusterName string `json:"cluster_name"` - LocationID string `json:"location_id"` - Name string `json:"name"` - Description string `json:"description"` - Retention uint32 `json:"retention"` - DataModel DataModel `json:"data_model"` - Mode BackupMode `json:"mode"` - Retries uint32 `json:"retries"` - RetryInterval time.Duration `json:"retry_interval"` - Folder string `json:"folder"` + ServiceID string `json:"service_id"` + ClusterName string `json:"cluster_name"` + LocationID string `json:"location_id"` + Name string `json:"name"` + Description string `json:"description"` + Retention uint32 `json:"retention"` + DataModel DataModel `json:"data_model"` + Mode BackupMode `json:"mode"` + Retries uint32 `json:"retries"` + RetryInterval time.Duration `json:"retry_interval"` + Folder string `json:"folder"` + Compression BackupCompression `json:"compression"` } // MySQLBackupTaskData contains data for mysql backup task. diff --git a/managed/models/scheduled_tasks_helpers.go b/managed/models/scheduled_tasks_helpers.go index 4293ac01572..7f925eef96b 100644 --- a/managed/models/scheduled_tasks_helpers.go +++ b/managed/models/scheduled_tasks_helpers.go @@ -54,6 +54,7 @@ type ScheduledTasksFilter struct { ClusterName string LocationID string Mode BackupMode + Compression BackupCompression Name string Folder *string } @@ -107,6 +108,12 @@ func FindScheduledTasks(q *reform.Querier, filters ScheduledTasksFilter) ([]*Sch args = append(args, filters.Mode) idx++ } + if filters.Compression != "" { + crossJoin = true + andConds = append(andConds, "value ->> 'compression' = "+q.Placeholder(idx)) + args = append(args, filters.Compression) + idx++ + } if filters.Name != "" { crossJoin = true andConds = append(andConds, "value ->> 'name' = "+q.Placeholder(idx)) @@ -365,6 +372,15 @@ func (s *ScheduledTask) Mode() (BackupMode, error) { return data.Mode, nil } +// Compression returns backup compression. +func (s *ScheduledTask) Compression() (BackupCompression, error) { + data, err := s.CommonBackupData() + if err != nil { + return "", err + } + return data.Compression, nil +} + // LocationID returns task location. func (s *ScheduledTask) LocationID() (string, error) { data, err := s.CommonBackupData() diff --git a/managed/services/agents/jobs.go b/managed/services/agents/jobs.go index 901435d872a..2b5704969fc 100644 --- a/managed/services/agents/jobs.go +++ b/managed/services/agents/jobs.go @@ -150,7 +150,7 @@ func (s *JobsService) RestartJob(ctx context.Context, jobID string) error { switch job.Type { case models.MySQLBackupJob: - if err := s.StartMySQLBackupJob(job.ID, job.PMMAgentID, job.Timeout, artifact.Name, dbConfig, locationConfig, artifact.Folder); err != nil { + if err := s.StartMySQLBackupJob(job.ID, job.PMMAgentID, job.Timeout, artifact.Name, dbConfig, locationConfig, artifact.Folder, artifact.Compression); err != nil { return errors.WithStack(err) } case models.MongoDBBackupJob: @@ -160,7 +160,7 @@ func (s *JobsService) RestartJob(ctx context.Context, jobID string) error { } if err := s.StartMongoDBBackupJob(service, job.ID, job.PMMAgentID, job.Timeout, artifact.Name, - job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig, artifact.Folder); err != nil { + job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig, artifact.Folder, artifact.Compression); err != nil { return errors.WithStack(err) } case models.MySQLRestoreBackupJob: @@ -375,7 +375,7 @@ func (s *JobsService) handleJobProgress(_ context.Context, progress *agentv1.Job } // StartMySQLBackupJob starts mysql backup job on the pmm-agent. -func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string) error { //nolint:lll +func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string, compression models.BackupCompression) error { //nolint:lll if err := models.PMMAgentSupported(s.r.db.Querier, pmmAgentID, "mysql backup", pmmAgentMinVersionForMySQLBackupAndRestore); err != nil { return err @@ -391,6 +391,11 @@ func (s *JobsService) StartMySQLBackupJob(jobID, pmmAgentID string, timeout time Folder: folder, } + var err error + if mySQLReq.Compression, err = convertBackupCompression(compression); err != nil { + return err + } + switch { case locationConfig.S3Config != nil: mySQLReq.LocationConfig = &agentv1.StartJobRequest_MySQLBackup_S3Config{ @@ -434,6 +439,7 @@ func (s *JobsService) StartMongoDBBackupJob( dataModel models.DataModel, locationConfig *models.BackupLocationConfig, folder string, + compression models.BackupCompression, ) error { var err error switch dataModel { @@ -472,6 +478,10 @@ func (s *JobsService) StartMongoDBBackupJob( return err } + if mongoDBReq.Compression, err = convertBackupCompression(compression); err != nil { + return err + } + switch { case locationConfig.S3Config != nil: mongoDBReq.LocationConfig = &agentv1.StartJobRequest_MongoDBBackup_S3Config{ @@ -522,6 +532,7 @@ func (s *JobsService) StartMySQLRestoreBackupJob( name string, locationConfig *models.BackupLocationConfig, folder string, + compression models.BackupCompression, ) error { if err := models.PMMAgentSupported(s.r.db.Querier, pmmAgentID, "mysql restore", pmmAgentMinVersionForMySQLBackupAndRestore); err != nil { @@ -532,18 +543,25 @@ func (s *JobsService) StartMySQLRestoreBackupJob( return errors.Errorf("location config is not set") } + mySQLReq := &agentv1.StartJobRequest_MySQLRestoreBackup{ + ServiceId: serviceID, + Name: name, + Folder: folder, + LocationConfig: &agentv1.StartJobRequest_MySQLRestoreBackup_S3Config{ + S3Config: convertS3ConfigModel(locationConfig.S3Config), + }, + } + + var err error + if mySQLReq.Compression, err = convertBackupCompression(compression); err != nil { + return err + } + req := &agentv1.StartJobRequest{ JobId: jobID, Timeout: durationpb.New(timeout), Job: &agentv1.StartJobRequest_MysqlRestoreBackup{ - MysqlRestoreBackup: &agentv1.StartJobRequest_MySQLRestoreBackup{ - ServiceId: serviceID, - Name: name, - Folder: folder, - LocationConfig: &agentv1.StartJobRequest_MySQLRestoreBackup_S3Config{ - S3Config: convertS3ConfigModel(locationConfig.S3Config), - }, - }, + MysqlRestoreBackup: mySQLReq, }, } @@ -575,6 +593,7 @@ func (s *JobsService) StartMongoDBRestoreBackupJob( locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, folder string, + compression models.BackupCompression, ) error { var err error switch dataModel { @@ -620,6 +639,10 @@ func (s *JobsService) StartMongoDBRestoreBackupJob( }, } + if mongoDBReq.Compression, err = convertBackupCompression(compression); err != nil { + return err + } + switch { case locationConfig.S3Config != nil: mongoDBReq.LocationConfig = &agentv1.StartJobRequest_MongoDBRestoreBackup_S3Config{ @@ -790,6 +813,31 @@ func convertDataModel(model models.DataModel) (backuppb.DataModel, error) { } } +func convertBackupCompression(compression models.BackupCompression) (backuppb.BackupCompression, error) { + switch compression { + case models.QuickLZ: + return backuppb.BackupCompression_BACKUP_COMPRESSION_QUICKLZ, nil + case models.ZSTD: + return backuppb.BackupCompression_BACKUP_COMPRESSION_ZSTD, nil + case models.LZ4: + return backuppb.BackupCompression_BACKUP_COMPRESSION_LZ4, nil + case models.S2: + return backuppb.BackupCompression_BACKUP_COMPRESSION_S2, nil + case models.GZIP: + return backuppb.BackupCompression_BACKUP_COMPRESSION_GZIP, nil + case models.Snappy: + return backuppb.BackupCompression_BACKUP_COMPRESSION_SNAPPY, nil + case models.PGZIP: + return backuppb.BackupCompression_BACKUP_COMPRESSION_PGZIP, nil + case models.None: + return backuppb.BackupCompression_BACKUP_COMPRESSION_NONE, nil + case models.Default: + return backuppb.BackupCompression_BACKUP_COMPRESSION_DEFAULT, nil + default: + return 0, errors.Errorf("invalid compression '%s'", compression) + } +} + func createJobLog(querier *reform.Querier, jobID, data string, chunkID int, lastChunk bool) error { _, err := models.CreateJobLog( querier, diff --git a/managed/services/agents/versioner.go b/managed/services/agents/versioner.go index d7f8dc6ed51..4e9110552e8 100644 --- a/managed/services/agents/versioner.go +++ b/managed/services/agents/versioner.go @@ -88,7 +88,7 @@ type Xbcloud struct{} func (*Xbcloud) isSoftware() {} -// Name returns the software name for Qpress. +// Name returns the software name for xbcloud. func (*Xbcloud) Name() models.SoftwareName { return models.XbcloudSoftwareName } // GetVersionRequest constructs a request for Xbcloud software. @@ -145,7 +145,7 @@ func (*PBM) GetVersionRequest() *agentv1.GetVersionsRequest_Software { // getMysqlSoftwareList returns list of software required for MySQL backups. func getMysqlSoftwareList() []Software { - return []Software{&Mysqld{}, &Xtrabackup{}, &Xbcloud{}, &Qpress{}} + return []Software{&Mysqld{}, &Xtrabackup{}, &Xbcloud{}} } // getMongodbSoftwareList returns list of software required for MongoDB backups. diff --git a/managed/services/backup/backup_service.go b/managed/services/backup/backup_service.go index 59031558456..6ec823df759 100644 --- a/managed/services/backup/backup_service.go +++ b/managed/services/backup/backup_service.go @@ -66,6 +66,7 @@ type PerformBackupParams struct { Retries uint32 RetryInterval time.Duration Folder string + Compression models.BackupCompression } // PerformBackup starts on-demand backup. @@ -156,16 +157,17 @@ func (s *Service) PerformBackup(ctx context.Context, params PerformBackupParams) if artifact == nil { if artifact, err = models.CreateArtifact(tx.Querier, models.CreateArtifactParams{ - Name: name, - Vendor: string(svc.ServiceType), - DBVersion: dbVersion, - LocationID: locationModel.ID, - ServiceID: svc.ServiceID, - DataModel: params.DataModel, - Mode: params.Mode, - Status: models.PendingBackupStatus, - ScheduleID: params.ScheduleID, - Folder: params.Folder, + Name: name, + Vendor: string(svc.ServiceType), + DBVersion: dbVersion, + LocationID: locationModel.ID, + ServiceID: svc.ServiceID, + DataModel: params.DataModel, + Mode: params.Mode, + Status: models.PendingBackupStatus, + ScheduleID: params.ScheduleID, + Folder: params.Folder, + Compression: params.Compression, }); err != nil { return err } @@ -213,10 +215,10 @@ func (s *Service) PerformBackup(ctx context.Context, params PerformBackupParams) switch svc.ServiceType { case models.MySQLServiceType: - err = s.jobsService.StartMySQLBackupJob(job.ID, job.PMMAgentID, 0, name, dbConfig, locationConfig, params.Folder) + err = s.jobsService.StartMySQLBackupJob(job.ID, job.PMMAgentID, 0, name, dbConfig, locationConfig, params.Folder, params.Compression) case models.MongoDBServiceType: err = s.jobsService.StartMongoDBBackupJob(svc, job.ID, job.PMMAgentID, 0, name, - job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig, params.Folder) + job.Data.MongoDBBackup.Mode, job.Data.MongoDBBackup.DataModel, locationConfig, params.Folder, params.Compression) case models.PostgreSQLServiceType, models.ProxySQLServiceType, models.HAProxyServiceType, @@ -254,6 +256,7 @@ type restoreJobParams struct { DataModel models.DataModel PITRTimestamp time.Time Folder string + Compression models.BackupCompression } // RestoreBackup starts restore backup job. @@ -369,6 +372,7 @@ func (s *Service) RestoreBackup(ctx context.Context, serviceID, artifactID strin DataModel: artifact.DataModel, PITRTimestamp: pitrTimestamp, Folder: artifactFolder, + Compression: artifact.Compression, } if len(artifact.MetadataList) != 0 && @@ -449,7 +453,8 @@ func (s *Service) startRestoreJob(params *restoreJobParams) error { 0, params.ArtifactName, locationConfig, - params.Folder) + params.Folder, + params.Compression) case models.MongoDBServiceType: return s.jobsService.StartMongoDBRestoreBackupJob( params.Service, @@ -461,7 +466,8 @@ func (s *Service) startRestoreJob(params *restoreJobParams) error { params.DataModel, locationConfig, params.PITRTimestamp, - params.Folder) + params.Folder, + params.Compression) case models.PostgreSQLServiceType, models.ProxySQLServiceType, models.HAProxyServiceType, diff --git a/managed/services/backup/backup_service_test.go b/managed/services/backup/backup_service_test.go index 9ee71de7760..9554ce99b54 100644 --- a/managed/services/backup/backup_service_test.go +++ b/managed/services/backup/backup_service_test.go @@ -162,16 +162,17 @@ func TestPerformBackup(t *testing.T) { S3Config: tc.locationModel.S3Config, } mockedJobsService.On("StartMySQLBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), time.Duration(0), - mock.Anything, mock.Anything, locationConfig, "artifact_folder").Return(nil).Once() + mock.Anything, mock.Anything, locationConfig, "artifact_folder", models.Default).Return(nil).Once() } artifactID, err := backupService.PerformBackup(ctx, PerformBackupParams{ - ServiceID: pointer.GetString(agent.ServiceID), - LocationID: tc.locationModel.ID, - Name: tc.name + "_" + "test_backup", - DataModel: tc.dataModel, - Mode: models.Snapshot, - Folder: "artifact_folder", + ServiceID: pointer.GetString(agent.ServiceID), + LocationID: tc.locationModel.ID, + Name: tc.name + "_" + "test_backup", + DataModel: tc.dataModel, + Mode: models.Snapshot, + Folder: "artifact_folder", + Compression: models.Default, }) if tc.expectedError != nil { @@ -197,12 +198,13 @@ func TestPerformBackup(t *testing.T) { mockedCompatibilityService.On("CheckSoftwareCompatibilityForService", ctx, pointer.GetString(agent.ServiceID)). Return("", nil).Once() artifactID, err := backupService.PerformBackup(ctx, PerformBackupParams{ - ServiceID: pointer.GetString(agent.ServiceID), - LocationID: s3Location.ID, - Name: "test_backup", - DataModel: models.PhysicalDataModel, - Mode: models.PITR, - Folder: "artifact_folder_2", + ServiceID: pointer.GetString(agent.ServiceID), + LocationID: s3Location.ID, + Name: "test_backup", + DataModel: models.PhysicalDataModel, + Mode: models.PITR, + Folder: "artifact_folder_2", + Compression: models.Default, }) assert.ErrorIs(t, err, ErrIncompatibleDataModel) assert.Empty(t, artifactID) @@ -211,12 +213,13 @@ func TestPerformBackup(t *testing.T) { t.Run("backup fails for empty service ID", func(t *testing.T) { mockedCompatibilityService.On("CheckSoftwareCompatibilityForService", ctx, "").Return("", nil).Once() artifactID, err := backupService.PerformBackup(ctx, PerformBackupParams{ - ServiceID: "", - LocationID: s3Location.ID, - Name: "test_backup", - DataModel: models.PhysicalDataModel, - Mode: models.PITR, - Folder: "artifact_folder_3", + ServiceID: "", + LocationID: s3Location.ID, + Name: "test_backup", + DataModel: models.PhysicalDataModel, + Mode: models.PITR, + Folder: "artifact_folder_3", + Compression: models.Default, }) assert.ErrorContains(t, err, "Empty Service ID") assert.Empty(t, artifactID) @@ -226,12 +229,13 @@ func TestPerformBackup(t *testing.T) { mockedCompatibilityService.On("CheckSoftwareCompatibilityForService", ctx, pointer.GetString(agent.ServiceID)). Return("", nil).Once() artifactID, err := backupService.PerformBackup(ctx, PerformBackupParams{ - ServiceID: pointer.GetString(agent.ServiceID), - LocationID: s3Location.ID, - Name: "test_backup", - DataModel: models.PhysicalDataModel, - Mode: models.Incremental, - Folder: "artifact_folder_4", + ServiceID: pointer.GetString(agent.ServiceID), + LocationID: s3Location.ID, + Name: "test_backup", + DataModel: models.PhysicalDataModel, + Mode: models.Incremental, + Folder: "artifact_folder_4", + Compression: models.Default, }) assert.ErrorContains(t, err, "the only supported backups mode for mongoDB is snapshot and PITR") assert.Empty(t, artifactID) @@ -286,15 +290,16 @@ func TestRestoreBackup(t *testing.T) { t.Run("mysql", func(t *testing.T) { agent, _ := setup(t, db.Querier, models.MySQLServiceType, "test-mysql-restore-service") artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "mysql-artifact-name", - Vendor: string(models.MySQLServiceType), - DBVersion: "8.0.25", - LocationID: s3Location.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, - Folder: artifactFolder, + Name: "mysql-artifact-name", + Vendor: string(models.MySQLServiceType), + DBVersion: "8.0.25", + LocationID: s3Location.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Folder: artifactFolder, + Compression: models.Default, }) require.NoError(t, err) @@ -326,7 +331,7 @@ func TestRestoreBackup(t *testing.T) { if tc.expectedError == nil { mockedJobsService.On("StartMySQLRestoreBackupJob", mock.Anything, pointer.GetString(agent.PMMAgentID), - pointer.GetString(agent.ServiceID), mock.Anything, artifact.Name, mock.Anything, artifactFolder).Return(nil).Once() + pointer.GetString(agent.ServiceID), mock.Anything, artifact.Name, mock.Anything, artifactFolder, artifact.Compression).Return(nil).Once() } restoreID, err := backupService.RestoreBackup(ctx, pointer.GetString(agent.ServiceID), artifact.ID, time.Unix(0, 0)) if tc.expectedError != nil { @@ -355,15 +360,16 @@ func TestRestoreBackup(t *testing.T) { t.Run("mongo", func(t *testing.T) { agent, service := setup(t, db.Querier, models.MongoDBServiceType, "test-mongo-restore-service") artifactWithVersion, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "mongodb-artifact-name-version", - Vendor: string(models.MongoDBSoftwareName), - DBVersion: "6.0.2-1", - LocationID: s3Location.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, - Folder: artifactFolder, + Name: "mongodb-artifact-name-version", + Vendor: string(models.MongoDBSoftwareName), + DBVersion: "6.0.2-1", + LocationID: s3Location.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Folder: artifactFolder, + Compression: models.Default, }) require.NoError(t, err) @@ -373,13 +379,14 @@ func TestRestoreBackup(t *testing.T) { require.NoError(t, err) artifactNoVersion, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "mongodb-artifact-name-no-version", - Vendor: string(models.MongoDBSoftwareName), - LocationID: s3Location.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "mongodb-artifact-name-no-version", + Vendor: string(models.MongoDBSoftwareName), + LocationID: s3Location.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }) require.NoError(t, err) @@ -423,11 +430,11 @@ func TestRestoreBackup(t *testing.T) { if len(tc.artifact.MetadataList) != 0 && tc.artifact.MetadataList[0].BackupToolData != nil { mockedJobsService.On("StartMongoDBRestoreBackupJob", service, mock.Anything, pointer.GetString(agent.PMMAgentID), time.Duration(0), tc.artifact.Name, tc.artifact.MetadataList[0].BackupToolData.PbmMetadata.Name, tc.artifact.DataModel, - mock.Anything, time.Unix(0, 0), tc.artifact.Folder).Return(nil).Once() + mock.Anything, time.Unix(0, 0), tc.artifact.Folder, tc.artifact.Compression).Return(nil).Once() } else { mockedJobsService.On("StartMongoDBRestoreBackupJob", service, mock.Anything, pointer.GetString(agent.PMMAgentID), time.Duration(0), tc.artifact.Name, "", tc.artifact.DataModel, - mock.Anything, time.Unix(0, 0), tc.artifact.Folder).Return(nil).Once() + mock.Anything, time.Unix(0, 0), tc.artifact.Folder, tc.artifact.Compression).Return(nil).Once() } } restoreID, err := backupService.RestoreBackup(ctx, pointer.GetString(agent.ServiceID), tc.artifact.ID, time.Unix(0, 0)) @@ -443,13 +450,14 @@ func TestRestoreBackup(t *testing.T) { t.Run("artifact not ready", func(t *testing.T) { artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "mongo-artifact-name-s3", - Vendor: string(models.MongoDBServiceType), - LocationID: s3Location.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, - Status: models.PendingBackupStatus, + Name: "mongo-artifact-name-s3", + Vendor: string(models.MongoDBServiceType), + LocationID: s3Location.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.PendingBackupStatus, + Compression: models.Default, }) require.NoError(t, err) @@ -460,13 +468,14 @@ func TestRestoreBackup(t *testing.T) { t.Run("PITR not supported for local storages", func(t *testing.T) { artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "mongo-artifact-name-local", - Vendor: string(models.MongoDBServiceType), - LocationID: filesystemLocation.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-local", + Vendor: string(models.MongoDBServiceType), + LocationID: filesystemLocation.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Compression: models.Default, }) require.NoError(t, err) @@ -519,14 +528,15 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "success", pitrValue: time.Unix(0, 0), artifactParams: models.CreateArtifactParams{ - Name: "mysql-artifact-name-1", - Vendor: string(models.MySQLServiceType), - DBVersion: "8.0.25", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "mysql-artifact-name-1", + Vendor: string(models.MySQLServiceType), + DBVersion: "8.0.25", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: nil, }, @@ -534,14 +544,15 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "PITR not supported for MySQL", pitrValue: time.Unix(0, 0), artifactParams: models.CreateArtifactParams{ - Name: "mysql-artifact-name-2", - Vendor: string(models.MySQLServiceType), - DBVersion: "8.0.25", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, + Name: "mysql-artifact-name-2", + Vendor: string(models.MySQLServiceType), + DBVersion: "8.0.25", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: ErrIncompatibleService, }, @@ -549,14 +560,15 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "snapshot artifact is not compatible with non-empty pitr date", pitrValue: time.Unix(1, 0), artifactParams: models.CreateArtifactParams{ - Name: "mysql-artifact-name-3", - Vendor: string(models.MySQLServiceType), - DBVersion: "8.0.25", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "mysql-artifact-name-3", + Vendor: string(models.MySQLServiceType), + DBVersion: "8.0.25", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: ErrIncompatibleArtifactMode, }, @@ -600,13 +612,14 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "success logical restore", pitrValue: time.Unix(0, 0), artifactParams: models.CreateArtifactParams{ - Name: "mongo-artifact-name-1", - Vendor: string(models.MongoDBServiceType), - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-1", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: nil, }, @@ -614,13 +627,14 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "physical restore is supported", pitrValue: time.Unix(0, 0), artifactParams: models.CreateArtifactParams{ - Name: "mongo-artifact-name-2", - Vendor: string(models.MongoDBServiceType), - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-2", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: nil, }, @@ -628,13 +642,14 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "snapshot artifact is not compatible with non-empty pitr date", pitrValue: time.Unix(1, 0), artifactParams: models.CreateArtifactParams{ - Name: "mongo-artifact-name-3", - Vendor: string(models.MongoDBServiceType), - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-3", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: ErrIncompatibleArtifactMode, }, @@ -642,13 +657,14 @@ func TestCheckArtifactModePreconditions(t *testing.T) { name: "timestamp not provided for pitr artifact", pitrValue: time.Unix(0, 0), artifactParams: models.CreateArtifactParams{ - Name: "mongo-artifact-name-4", - Vendor: string(models.MongoDBServiceType), - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-4", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: ErrIncompatibleArtifactMode, }, @@ -657,13 +673,14 @@ func TestCheckArtifactModePreconditions(t *testing.T) { pitrValue: time.Unix(int64(rangeStart2)-1, 0), prepareMock: true, artifactParams: models.CreateArtifactParams{ - Name: "mongo-artifact-name-5", - Vendor: string(models.MongoDBServiceType), - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-5", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: ErrTimestampOutOfRange, }, @@ -672,13 +689,14 @@ func TestCheckArtifactModePreconditions(t *testing.T) { pitrValue: time.Unix(int64(rangeStart2)+1, 0), prepareMock: true, artifactParams: models.CreateArtifactParams{ - Name: "mongo-artifact-name-6", - Vendor: string(models.MongoDBServiceType), - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, + Name: "mongo-artifact-name-6", + Vendor: string(models.MongoDBServiceType), + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Compression: models.Default, }, err: nil, }, @@ -694,6 +712,7 @@ func TestCheckArtifactModePreconditions(t *testing.T) { Mode: models.Snapshot, Status: models.SuccessBackupStatus, IsShardedCluster: true, + Compression: models.Default, }, err: ErrIncompatibleService, }, diff --git a/managed/services/backup/compatibility_helpers.go b/managed/services/backup/compatibility_helpers.go index 94f0f188826..66137314976 100644 --- a/managed/services/backup/compatibility_helpers.go +++ b/managed/services/backup/compatibility_helpers.go @@ -169,7 +169,6 @@ func mySQLBackupSoftwareInstalledAndCompatible(svm map[models.SoftwareName]strin models.MysqldSoftwareName, models.XtrabackupSoftwareName, models.XbcloudSoftwareName, - models.QpressSoftwareName, } { if svm[name] == "" { if name == models.XtrabackupSoftwareName || name == models.XbcloudSoftwareName { diff --git a/managed/services/backup/compatibility_service_test.go b/managed/services/backup/compatibility_service_test.go index 151c66744a9..ee693d1bbb6 100644 --- a/managed/services/backup/compatibility_service_test.go +++ b/managed/services/backup/compatibility_service_test.go @@ -40,7 +40,6 @@ func TestCheckCompatibility(t *testing.T) { &agents.Mysqld{}, &agents.Xtrabackup{}, &agents.Xbcloud{}, - &agents.Qpress{}, } require.Equal(t, expectedMysqlSoftware, mysqlSoftware) @@ -65,7 +64,6 @@ func TestCheckCompatibility(t *testing.T) { {Version: "8.0.25"}, {Version: ""}, {Version: ""}, - {Version: "1.1"}, }, expectedError: ErrXtrabackupNotInstalled, }, @@ -76,7 +74,6 @@ func TestCheckCompatibility(t *testing.T) { {Version: "8.0.25"}, {Version: "8.0.24"}, {Version: "8.0.25"}, - {Version: "1.1"}, }, expectedError: ErrInvalidXtrabackup, }, @@ -87,21 +84,9 @@ func TestCheckCompatibility(t *testing.T) { {Version: "8.0.25"}, {Version: "8.0.24"}, {Version: "8.0.24"}, - {Version: "1.1"}, }, expectedError: ErrIncompatibleXtrabackup, }, - { - name: "qpress no installed", - serviceType: models.MySQLServiceType, - versions: []agents.Version{ - {Version: "8.0.25"}, - {Version: "8.0.25"}, - {Version: "8.0.25"}, - {Version: ""}, - }, - expectedError: ErrIncompatibleService, - }, { name: "mysql no installed", serviceType: models.MySQLServiceType, @@ -109,7 +94,6 @@ func TestCheckCompatibility(t *testing.T) { {Version: ""}, {Version: "8.0.25"}, {Version: "8.0.25"}, - {Version: "1.1"}, }, expectedError: ErrIncompatibleService, }, @@ -120,7 +104,6 @@ func TestCheckCompatibility(t *testing.T) { {Version: "8.0.25"}, {Version: "8.0.25", Error: "Some error"}, {Version: "8.0.25"}, - {Version: "1.1"}, }, expectedError: ErrComparisonImpossible, }, @@ -139,7 +122,6 @@ func TestCheckCompatibility(t *testing.T) { {Version: "8.0.25"}, {Version: "8.0.25"}, {Version: "8.0.25"}, - {Version: "1.1"}, }, expectedError: nil, }, @@ -222,7 +204,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: ""}, {Name: models.XtrabackupSoftwareName, Version: "8.0.25"}, {Name: models.XbcloudSoftwareName, Version: "8.0.25"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, }, }, { @@ -231,7 +212,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: "8.0.25"}, {Name: models.XtrabackupSoftwareName, Version: "8.0.24"}, {Name: models.XbcloudSoftwareName, Version: "8.0.25"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, }, }, { @@ -240,16 +220,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: "8.0.25"}, {Name: models.XtrabackupSoftwareName, Version: "8.0.25"}, {Name: models.XbcloudSoftwareName, Version: "8.0.24"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, - }, - }, - { - ServiceID: "4", - SoftwareVersions: models.SoftwareVersions{ - {Name: models.MysqldSoftwareName, Version: "8.0.25"}, - {Name: models.XtrabackupSoftwareName, Version: "8.0.25"}, - {Name: models.XbcloudSoftwareName, Version: "8.0.25"}, - {Name: models.QpressSoftwareName, Version: ""}, }, }, { @@ -258,7 +228,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: "8.0.25"}, {Name: models.XtrabackupSoftwareName, Version: "8.0.25"}, {Name: models.XbcloudSoftwareName, Version: "8.0.25"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, }, }, { @@ -267,7 +236,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: "8.0.25"}, {Name: models.XtrabackupSoftwareName, Version: ""}, {Name: models.XbcloudSoftwareName, Version: "8.0.25"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, }, }, { @@ -276,7 +244,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: "8.0.24"}, {Name: models.XtrabackupSoftwareName, Version: "8.0.25"}, {Name: models.XbcloudSoftwareName, Version: "8.0.25"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, }, }, { @@ -285,7 +252,6 @@ func TestFindCompatibleServiceIDs(t *testing.T) { {Name: models.MysqldSoftwareName, Version: "8.0.25"}, {Name: models.XtrabackupSoftwareName, Version: "8.0.26"}, {Name: models.XbcloudSoftwareName, Version: "8.0.26"}, - {Name: models.QpressSoftwareName, Version: "1.1"}, }, }, } @@ -519,7 +485,6 @@ func TestFindArtifactCompatibleServices(t *testing.T) { {Name: "mysqld", Version: "8.0.25"}, {Name: "xtrabackup", Version: "8.0.25"}, {Name: "xbcloud", Version: "8.0.25"}, - {Name: "qpress", Version: "1.1"}, }, } @@ -543,7 +508,6 @@ func TestFindArtifactCompatibleServices(t *testing.T) { {Name: "mysqld", Version: "8.0.25"}, {Name: "xtrabackup", Version: "8.0.24"}, {Name: "xbcloud", Version: "8.0.24"}, - {Name: "qpress", Version: "1.1"}, }, }, } @@ -561,7 +525,6 @@ func TestFindArtifactCompatibleServices(t *testing.T) { SoftwareVersions: models.SoftwareVersions{ {Name: "mysqld", Version: "8.0.25"}, {Name: "xtrabackup", Version: "8.0.25"}, - {Name: "qpress", Version: "1.1"}, }, }, } @@ -580,7 +543,6 @@ func TestFindArtifactCompatibleServices(t *testing.T) { {Name: "mysqld", Version: "8.0.25"}, {Name: "xtrabackup", Version: "8.0.26"}, {Name: "xbcloud", Version: "8.0.26"}, - {Name: "qpress", Version: "1.1"}, }, }, } @@ -599,7 +561,6 @@ func TestFindArtifactCompatibleServices(t *testing.T) { {Name: "mysqld", Version: "8.0.25"}, {Name: "xtrabackup", Version: "8.0.25"}, {Name: "xbcloud", Version: "8.0.25"}, - {Name: "qpress", Version: "1.1"}, }, }, } diff --git a/managed/services/backup/deps.go b/managed/services/backup/deps.go index 914441a48c3..189f3fb5c4d 100644 --- a/managed/services/backup/deps.go +++ b/managed/services/backup/deps.go @@ -36,6 +36,7 @@ type jobsService interface { dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string, + compression models.BackupCompression, ) error StartMySQLRestoreBackupJob( jobID string, @@ -45,6 +46,7 @@ type jobsService interface { name string, locationConfig *models.BackupLocationConfig, folder string, + compression models.BackupCompression, ) error StartMongoDBBackupJob( service *models.Service, @@ -56,6 +58,7 @@ type jobsService interface { dataModel models.DataModel, locationConfig *models.BackupLocationConfig, folder string, + compression models.BackupCompression, ) error StartMongoDBRestoreBackupJob( service *models.Service, @@ -68,6 +71,7 @@ type jobsService interface { locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, folder string, + compression models.BackupCompression, ) error } diff --git a/managed/services/backup/mock_jobs_service_test.go b/managed/services/backup/mock_jobs_service_test.go index 8d75301db53..7c8495fc880 100644 --- a/managed/services/backup/mock_jobs_service_test.go +++ b/managed/services/backup/mock_jobs_service_test.go @@ -15,17 +15,17 @@ type mockJobsService struct { mock.Mock } -// StartMongoDBBackupJob provides a mock function with given fields: service, jobID, pmmAgentID, timeout, name, mode, dataModel, locationConfig, folder -func (_m *mockJobsService) StartMongoDBBackupJob(service *models.Service, jobID string, pmmAgentID string, timeout time.Duration, name string, mode models.BackupMode, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, folder string) error { - ret := _m.Called(service, jobID, pmmAgentID, timeout, name, mode, dataModel, locationConfig, folder) +// StartMongoDBBackupJob provides a mock function with given fields: service, jobID, pmmAgentID, timeout, name, mode, dataModel, locationConfig, folder, compression +func (_m *mockJobsService) StartMongoDBBackupJob(service *models.Service, jobID string, pmmAgentID string, timeout time.Duration, name string, mode models.BackupMode, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, folder string, compression models.BackupCompression) error { + ret := _m.Called(service, jobID, pmmAgentID, timeout, name, mode, dataModel, locationConfig, folder, compression) if len(ret) == 0 { panic("no return value specified for StartMongoDBBackupJob") } var r0 error - if rf, ok := ret.Get(0).(func(*models.Service, string, string, time.Duration, string, models.BackupMode, models.DataModel, *models.BackupLocationConfig, string) error); ok { - r0 = rf(service, jobID, pmmAgentID, timeout, name, mode, dataModel, locationConfig, folder) + if rf, ok := ret.Get(0).(func(*models.Service, string, string, time.Duration, string, models.BackupMode, models.DataModel, *models.BackupLocationConfig, string, models.BackupCompression) error); ok { + r0 = rf(service, jobID, pmmAgentID, timeout, name, mode, dataModel, locationConfig, folder, compression) } else { r0 = ret.Error(0) } @@ -33,17 +33,17 @@ func (_m *mockJobsService) StartMongoDBBackupJob(service *models.Service, jobID return r0 } -// StartMongoDBRestoreBackupJob provides a mock function with given fields: service, jobID, pmmAgentID, timeout, name, pbmBackupName, dataModel, locationConfig, pitrTimestamp, folder -func (_m *mockJobsService) StartMongoDBRestoreBackupJob(service *models.Service, jobID string, pmmAgentID string, timeout time.Duration, name string, pbmBackupName string, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, folder string) error { - ret := _m.Called(service, jobID, pmmAgentID, timeout, name, pbmBackupName, dataModel, locationConfig, pitrTimestamp, folder) +// StartMongoDBRestoreBackupJob provides a mock function with given fields: service, jobID, pmmAgentID, timeout, name, pbmBackupName, dataModel, locationConfig, pitrTimestamp, folder, compression +func (_m *mockJobsService) StartMongoDBRestoreBackupJob(service *models.Service, jobID string, pmmAgentID string, timeout time.Duration, name string, pbmBackupName string, dataModel models.DataModel, locationConfig *models.BackupLocationConfig, pitrTimestamp time.Time, folder string, compression models.BackupCompression) error { + ret := _m.Called(service, jobID, pmmAgentID, timeout, name, pbmBackupName, dataModel, locationConfig, pitrTimestamp, folder, compression) if len(ret) == 0 { panic("no return value specified for StartMongoDBRestoreBackupJob") } var r0 error - if rf, ok := ret.Get(0).(func(*models.Service, string, string, time.Duration, string, string, models.DataModel, *models.BackupLocationConfig, time.Time, string) error); ok { - r0 = rf(service, jobID, pmmAgentID, timeout, name, pbmBackupName, dataModel, locationConfig, pitrTimestamp, folder) + if rf, ok := ret.Get(0).(func(*models.Service, string, string, time.Duration, string, string, models.DataModel, *models.BackupLocationConfig, time.Time, string, models.BackupCompression) error); ok { + r0 = rf(service, jobID, pmmAgentID, timeout, name, pbmBackupName, dataModel, locationConfig, pitrTimestamp, folder, compression) } else { r0 = ret.Error(0) } @@ -51,17 +51,17 @@ func (_m *mockJobsService) StartMongoDBRestoreBackupJob(service *models.Service, return r0 } -// StartMySQLBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder -func (_m *mockJobsService) StartMySQLBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string) error { - ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder) +// StartMySQLBackupJob provides a mock function with given fields: jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder, compression +func (_m *mockJobsService) StartMySQLBackupJob(jobID string, pmmAgentID string, timeout time.Duration, name string, dbConfig *models.DBConfig, locationConfig *models.BackupLocationConfig, folder string, compression models.BackupCompression) error { + ret := _m.Called(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder, compression) if len(ret) == 0 { panic("no return value specified for StartMySQLBackupJob") } var r0 error - if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, *models.BackupLocationConfig, string) error); ok { - r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder) + if rf, ok := ret.Get(0).(func(string, string, time.Duration, string, *models.DBConfig, *models.BackupLocationConfig, string, models.BackupCompression) error); ok { + r0 = rf(jobID, pmmAgentID, timeout, name, dbConfig, locationConfig, folder, compression) } else { r0 = ret.Error(0) } @@ -69,17 +69,17 @@ func (_m *mockJobsService) StartMySQLBackupJob(jobID string, pmmAgentID string, return r0 } -// StartMySQLRestoreBackupJob provides a mock function with given fields: jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder -func (_m *mockJobsService) StartMySQLRestoreBackupJob(jobID string, pmmAgentID string, serviceID string, timeout time.Duration, name string, locationConfig *models.BackupLocationConfig, folder string) error { - ret := _m.Called(jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder) +// StartMySQLRestoreBackupJob provides a mock function with given fields: jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder, compression +func (_m *mockJobsService) StartMySQLRestoreBackupJob(jobID string, pmmAgentID string, serviceID string, timeout time.Duration, name string, locationConfig *models.BackupLocationConfig, folder string, compression models.BackupCompression) error { + ret := _m.Called(jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder, compression) if len(ret) == 0 { panic("no return value specified for StartMySQLRestoreBackupJob") } var r0 error - if rf, ok := ret.Get(0).(func(string, string, string, time.Duration, string, *models.BackupLocationConfig, string) error); ok { - r0 = rf(jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder) + if rf, ok := ret.Get(0).(func(string, string, string, time.Duration, string, *models.BackupLocationConfig, string, models.BackupCompression) error); ok { + r0 = rf(jobID, pmmAgentID, serviceID, timeout, name, locationConfig, folder, compression) } else { r0 = ret.Error(0) } diff --git a/managed/services/backup/removal_service_test.go b/managed/services/backup/removal_service_test.go index 3cf2bd3ce58..34f10298a96 100644 --- a/managed/services/backup/removal_service_test.go +++ b/managed/services/backup/removal_service_test.go @@ -71,13 +71,14 @@ func TestDeleteArtifact(t *testing.T) { createArtifact := func(status models.BackupStatus) *models.Artifact { artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "artifact_name", - Vendor: "MySQL", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: status, + Name: "artifact_name", + Vendor: "MySQL", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: status, + Compression: models.Default, }) require.NoError(t, err) return artifact @@ -182,14 +183,15 @@ func TestDeleteArtifact(t *testing.T) { agent, _ := setup(t, db.Querier, models.MongoDBServiceType, "test-service2") artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "artifact_name", - Vendor: "mongodb", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, - Folder: "artifact_folder", + Name: "artifact_name", + Vendor: "mongodb", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + Folder: "artifact_folder", + Compression: models.Default, }) require.NoError(t, err) @@ -270,14 +272,15 @@ func TestTrimPITRArtifact(t *testing.T) { require.NoError(t, err) artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "artifact_name", - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.PendingBackupStatus, - Folder: "artifact_folder", + Name: "artifact_name", + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.PendingBackupStatus, + Folder: "artifact_folder", + Compression: models.Default, }) require.NoError(t, err) @@ -397,14 +400,15 @@ func TestLockArtifact(t *testing.T) { require.NoError(t, err) artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "artifact_name", - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.PendingBackupStatus, - Folder: "artifact_folder", + Name: "artifact_name", + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.PendingBackupStatus, + Folder: "artifact_folder", + Compression: models.Default, }) require.NoError(t, err) @@ -493,14 +497,15 @@ func TestReleaseArtifact(t *testing.T) { require.NoError(t, err) artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "artifact_name", - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.DeletingBackupStatus, - Folder: "artifact_folder", + Name: "artifact_name", + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.DeletingBackupStatus, + Folder: "artifact_folder", + Compression: models.Default, }) require.NoError(t, err) diff --git a/managed/services/backup/retention_service_test.go b/managed/services/backup/retention_service_test.go index 8e176a5f004..75c4253ccdb 100644 --- a/managed/services/backup/retention_service_test.go +++ b/managed/services/backup/retention_service_test.go @@ -100,14 +100,15 @@ func TestEnsureRetention(t *testing.T) { createArtifact := func() { _, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: gofakeit.Name(), - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, - ScheduleID: task.ID, + Name: gofakeit.Name(), + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + ScheduleID: task.ID, + Compression: models.Default, }) require.NoError(t, err) } @@ -190,14 +191,15 @@ func TestEnsureRetention(t *testing.T) { t.Run("successful", func(t *testing.T) { artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: gofakeit.Name(), - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, - ScheduleID: task.ID, + Name: gofakeit.Name(), + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + ScheduleID: task.ID, + Compression: models.Default, }) require.NoError(t, err) @@ -224,14 +226,15 @@ func TestEnsureRetention(t *testing.T) { t.Run("more than one pitr artifact", func(t *testing.T) { _, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: gofakeit.Name(), - Vendor: "MongoDB", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.PITR, - Status: models.SuccessBackupStatus, - ScheduleID: task.ID, + Name: gofakeit.Name(), + Vendor: "MongoDB", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Status: models.SuccessBackupStatus, + ScheduleID: task.ID, + Compression: models.Default, }) require.NoError(t, err) diff --git a/managed/services/management/backup/backup_service.go b/managed/services/management/backup/backup_service.go index a1c783f2038..72f30bc9682 100644 --- a/managed/services/management/backup/backup_service.go +++ b/managed/services/management/backup/backup_service.go @@ -106,11 +106,20 @@ func (s *BackupService) StartBackup(ctx context.Context, req *backupv1.StartBack return nil, status.Errorf(codes.InvalidArgument, "Invalid data model: %s", req.DataModel.String()) } + compression, err := convertCompressionToBackupCompression(req.Compression) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Invalid compression: %s", req.Compression.String()) + } + svc, err := models.FindServiceByID(s.db.Querier, req.ServiceId) if err != nil { return nil, err } + if err := compression.ValidateForServiceType(svc.ServiceType); err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Compression validation failed: %v", err) + } + if svc.ServiceType == models.MongoDBServiceType { if svc.Cluster == "" { return nil, status.Errorf(codes.FailedPrecondition, "Service %s must be a member of a cluster", svc.ServiceName) @@ -126,6 +135,7 @@ func (s *BackupService) StartBackup(ctx context.Context, req *backupv1.StartBack Retries: req.Retries, RetryInterval: req.RetryInterval.AsDuration(), Folder: req.Folder, + Compression: compression, }) if err != nil { return nil, convertError(err) @@ -177,6 +187,15 @@ func (s *BackupService) ScheduleBackup(ctx context.Context, req *backupv1.Schedu return status.Errorf(codes.InvalidArgument, "Invalid data model: %s", req.DataModel.String()) } + compression, err := convertCompressionToBackupCompression(req.Compression) + if err != nil { + return status.Errorf(codes.InvalidArgument, "Invalid compression: %s", req.Compression.String()) + } + + if err := compression.ValidateForServiceType(svc.ServiceType); err != nil { + return status.Errorf(codes.InvalidArgument, "Compression validation failed: %v", err) + } + backupParams := &scheduler.BackupTaskParams{ ServiceID: svc.ServiceID, ClusterName: svc.Cluster, @@ -189,6 +208,7 @@ func (s *BackupService) ScheduleBackup(ctx context.Context, req *backupv1.Schedu Retries: req.Retries, RetryInterval: req.RetryInterval.AsDuration(), Folder: req.Folder, + Compression: compression, } var task scheduler.Task @@ -626,6 +646,32 @@ func (s *BackupService) ListPitrTimeranges(ctx context.Context, req *backupv1.Li }, nil } +// ListServiceCompression returns available compression methods for a service. +func (s *BackupService) ListServiceCompression(_ context.Context, req *backupv1.ListServiceCompressionRequest) (*backupv1.ListServiceCompressionResponse, error) { + svc, err := models.FindServiceByID(s.db.Querier, req.ServiceId) + if err != nil { + return nil, err + } + + supportedCompressions := models.GetSupportedCompressions(svc.ServiceType) + if supportedCompressions == nil { + return nil, status.Errorf(codes.Unimplemented, "backup compression is not yet supported for service type: %s", svc.ServiceType) + } + + compressionMethods := make([]backupv1.BackupCompression, 0, len(supportedCompressions)) + for _, compression := range supportedCompressions { + protoCompression, err := convertBackupCompression(compression) + if err != nil { + return nil, err + } + compressionMethods = append(compressionMethods, protoCompression) + } + + return &backupv1.ListServiceCompressionResponse{ + CompressionMethods: compressionMethods, + }, nil +} + func convertTaskToScheduledBackup(task *models.ScheduledTask, services map[string]*models.Service, locationModels map[string]*models.BackupLocation, @@ -675,6 +721,10 @@ func convertTaskToScheduledBackup(task *models.ScheduledTask, return nil, err } + if scheduledBackup.Compression, err = convertBackupCompression(commonBackupData.Compression); err != nil { + return nil, err + } + if commonBackupData.RetryInterval > 0 { scheduledBackup.RetryInterval = durationpb.New(commonBackupData.RetryInterval) } @@ -725,6 +775,31 @@ func convertModelToBackupModel(dataModel backupv1.DataModel) (models.DataModel, } } +func convertCompressionToBackupCompression(compression backupv1.BackupCompression) (models.BackupCompression, error) { + switch compression { + case backupv1.BackupCompression_BACKUP_COMPRESSION_QUICKLZ: + return models.QuickLZ, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD: + return models.ZSTD, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_LZ4: + return models.LZ4, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_S2: + return models.S2, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_GZIP: + return models.GZIP, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_SNAPPY: + return models.Snappy, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_PGZIP: + return models.PGZIP, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_NONE: + return models.None, nil + case backupv1.BackupCompression_BACKUP_COMPRESSION_DEFAULT: + return models.Default, nil + default: + return "", errors.Errorf("unknown backup compression: %s", compression) + } +} + // convertError converts error from Go to API. func convertError(e error) error { if e == nil { @@ -844,6 +919,31 @@ func convertBackupStatus(status models.BackupStatus) (backupv1.BackupStatus, err } } +func convertBackupCompression(compression models.BackupCompression) (backupv1.BackupCompression, error) { + switch compression { + case models.QuickLZ: + return backupv1.BackupCompression_BACKUP_COMPRESSION_QUICKLZ, nil + case models.ZSTD: + return backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, nil + case models.LZ4: + return backupv1.BackupCompression_BACKUP_COMPRESSION_LZ4, nil + case models.S2: + return backupv1.BackupCompression_BACKUP_COMPRESSION_S2, nil + case models.GZIP: + return backupv1.BackupCompression_BACKUP_COMPRESSION_GZIP, nil + case models.Snappy: + return backupv1.BackupCompression_BACKUP_COMPRESSION_SNAPPY, nil + case models.PGZIP: + return backupv1.BackupCompression_BACKUP_COMPRESSION_PGZIP, nil + case models.Default: + return backupv1.BackupCompression_BACKUP_COMPRESSION_DEFAULT, nil + case models.None: + return backupv1.BackupCompression_BACKUP_COMPRESSION_NONE, nil + default: + return 0, errors.Errorf("invalid compression '%s'", compression) + } +} + func convertArtifact( a *models.Artifact, services map[string]*models.Service, @@ -880,6 +980,11 @@ func convertArtifact( return nil, errors.Wrapf(err, "artifact id '%s'", a.ID) } + compression, err := convertBackupCompression(a.Compression) + if err != nil { + return nil, errors.Wrapf(err, "artifact id '%s'", a.ID) + } + return &backupv1.Artifact{ ArtifactId: a.ID, Name: a.Name, @@ -895,6 +1000,7 @@ func convertArtifact( IsShardedCluster: a.IsShardedCluster, Folder: a.Folder, MetadataList: artifactMetadataListToProto(a), + Compression: compression, }, nil } diff --git a/managed/services/management/backup/backup_service_test.go b/managed/services/management/backup/backup_service_test.go index e904e09d1a5..bfa252cf3fd 100644 --- a/managed/services/management/backup/backup_service_test.go +++ b/managed/services/management/backup/backup_service_test.go @@ -18,6 +18,7 @@ package backup import ( "context" "fmt" + "slices" "testing" "time" @@ -121,6 +122,7 @@ func TestStartBackup(t *testing.T) { DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, RetryInterval: nil, Retries: 0, + Compression: backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, }) assert.Nil(t, resp) st, ok := status.FromError(err) @@ -133,6 +135,61 @@ func TestStartBackup(t *testing.T) { assert.Equal(t, tc.code, detailedError.Code) }) } + + t.Run("compression test cases", func(t *testing.T) { + compressionTests := []struct { + name string + compression backupv1.BackupCompression + shouldError bool + }{ + { + name: "QuickLZ compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_QUICKLZ, + shouldError: false, + }, + { + name: "ZSTD compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, + shouldError: false, + }, + { + name: "LZ4 compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_LZ4, + shouldError: false, + }, + { + name: "None compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_NONE, + shouldError: false, + }, + } + + for _, tc := range compressionTests { + t.Run(tc.name, func(t *testing.T) { + backupService.On("PerformBackup", mock.Anything, mock.Anything). + Return("artifact_id", nil).Once() + ctx := context.Background() + resp, err := backupSvc.StartBackup(ctx, &backupv1.StartBackupRequest{ + ServiceId: *agent.ServiceID, + LocationId: "locationID", + Name: "name", + Description: "description", + DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, + RetryInterval: nil, + Retries: 0, + Compression: tc.compression, + }) + if tc.shouldError { + assert.Error(t, err) + assert.Nil(t, resp) + } else { + assert.NoError(t, err) + assert.NotNil(t, resp) + assert.Equal(t, "artifact_id", resp.ArtifactId) + } + }) + } + }) }) t.Run("mongodb", func(t *testing.T) { @@ -169,10 +226,80 @@ func TestStartBackup(t *testing.T) { RetryInterval: nil, Retries: 0, DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, + Compression: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, }) require.NoError(t, err) }) + t.Run("mongodb compression test cases", func(t *testing.T) { + compressionTests := []struct { + name string + compression backupv1.BackupCompression + shouldError bool + }{ + { + name: "GZIP compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_GZIP, + shouldError: false, + }, + { + name: "Snappy compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_SNAPPY, + shouldError: false, + }, + { + name: "LZ4 compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_LZ4, + shouldError: false, + }, + { + name: "S2 compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, + shouldError: false, + }, + { + name: "PGZIP compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_PGZIP, + shouldError: false, + }, + { + name: "ZSTD compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, + shouldError: false, + }, + { + name: "None compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_NONE, + shouldError: false, + }, + } + + for _, tc := range compressionTests { + t.Run(tc.name, func(t *testing.T) { + ctx := context.Background() + backupService := &mockBackupService{} + mockedPbmPITRService := &mockPbmPITRService{} + backupSvc := NewBackupsService(db, backupService, nil, nil, nil, mockedPbmPITRService) + backupService.On("PerformBackup", mock.Anything, mock.Anything).Return("artifact_id", nil) + _, err := backupSvc.StartBackup(ctx, &backupv1.StartBackupRequest{ + ServiceId: *agent.ServiceID, + LocationId: locationRes.ID, + Name: "name", + Description: "description", + RetryInterval: nil, + Retries: 0, + DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, + Compression: tc.compression, + }) + if tc.shouldError { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } + }) + t.Run("check folder and artifact name", func(t *testing.T) { ctx := context.Background() backupService := &mockBackupService{} @@ -230,10 +357,11 @@ func TestStartBackup(t *testing.T) { backupService.On("PerformBackup", mock.Anything, mock.Anything).Return("", nil).Once() } res, err := backupSvc.StartBackup(ctx, &backupv1.StartBackupRequest{ - Name: test.BackupName, - Folder: test.Folder, - ServiceId: *agent.ServiceID, - DataModel: backupv1.DataModel_DATA_MODEL_LOGICAL, + Name: test.BackupName, + Folder: test.Folder, + ServiceId: *agent.ServiceID, + DataModel: backupv1.DataModel_DATA_MODEL_LOGICAL, + Compression: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, }) if test.ErrString != "" { assert.Nil(t, res) @@ -291,6 +419,7 @@ func TestScheduledBackups(t *testing.T) { Enabled: true, Mode: backupv1.BackupMode_BACKUP_MODE_SNAPSHOT, DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, + Compression: backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, Retries: maxRetriesAttempts - 1, RetryInterval: durationpb.New(maxRetryInterval), } @@ -346,21 +475,22 @@ func TestScheduledBackups(t *testing.T) { task, err := models.CreateScheduledTask(db.Querier, models.CreateScheduledTaskParams{ CronExpression: "* * * * *", Type: models.ScheduledMySQLBackupTask, - Data: &models.ScheduledTaskData{MySQLBackupTask: &models.MySQLBackupTaskData{CommonBackupTaskData: models.CommonBackupTaskData{Name: t.Name()}}}, + Data: &models.ScheduledTaskData{MySQLBackupTask: &models.MySQLBackupTaskData{CommonBackupTaskData: models.CommonBackupTaskData{Name: t.Name(), Compression: models.ZSTD}}}, }) require.NoError(t, err) id := task.ID _, err = models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "artifact", - Vendor: "mysql", - LocationID: locationRes.ID, - ServiceID: *agent.ServiceID, - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, - Status: models.PendingBackupStatus, - ScheduleID: id, + Name: "artifact", + Vendor: "mysql", + LocationID: locationRes.ID, + ServiceID: *agent.ServiceID, + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Status: models.PendingBackupStatus, + ScheduleID: id, + Compression: models.ZSTD, }) require.NoError(t, err) @@ -401,6 +531,7 @@ func TestScheduledBackups(t *testing.T) { Retries: maxRetriesAttempts, DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, Mode: backupv1.BackupMode_BACKUP_MODE_PITR, + Compression: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, }) require.Error(t, err) tests.AssertGRPCErrorRE(t, codes.InvalidArgument, "PITR is only supported for logical backups", err) @@ -421,6 +552,7 @@ func TestScheduledBackups(t *testing.T) { Retries: maxRetriesAttempts, DataModel: backupv1.DataModel_DATA_MODEL_PHYSICAL, Mode: backupv1.BackupMode_BACKUP_MODE_SNAPSHOT, + Compression: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, }) require.NoError(t, err) }) @@ -551,13 +683,14 @@ func TestListPitrTimeranges(t *testing.T) { t.Run("successfully lists PITR time ranges", func(t *testing.T) { artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "test_artifact", - Vendor: "test_vendor", - LocationID: locationID, - ServiceID: "test_service", - Mode: models.PITR, - DataModel: models.LogicalDataModel, - Status: models.PendingBackupStatus, + Name: "test_artifact", + Vendor: "test_vendor", + LocationID: locationID, + ServiceID: "test_service", + Mode: models.PITR, + DataModel: models.LogicalDataModel, + Status: models.PendingBackupStatus, + Compression: models.S2, }) assert.NoError(t, err) assert.NotEmpty(t, artifact.ID) @@ -581,13 +714,14 @@ func TestListPitrTimeranges(t *testing.T) { t.Run("fails for non-PITR artifact", func(t *testing.T) { artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "test_non_pitr_artifact", - Vendor: "test_vendor", - LocationID: locationID, - ServiceID: "test_service", - Mode: models.Snapshot, - DataModel: models.LogicalDataModel, - Status: models.PendingBackupStatus, + Name: "test_non_pitr_artifact", + Vendor: "test_vendor", + LocationID: locationID, + ServiceID: "test_service", + Mode: models.Snapshot, + DataModel: models.LogicalDataModel, + Status: models.PendingBackupStatus, + Compression: models.ZSTD, }) assert.NoError(t, err) assert.NotEmpty(t, artifact.ID) @@ -601,6 +735,188 @@ func TestListPitrTimeranges(t *testing.T) { mock.AssertExpectationsForObjects(t, mockedPbmPITRService) } +func TestConvertBackupCompression(t *testing.T) { + tests := []struct { + name string + compression models.BackupCompression + expected backupv1.BackupCompression + shouldError bool + }{ + { + name: "QuickLZ compression", + compression: models.QuickLZ, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_QUICKLZ, + shouldError: false, + }, + { + name: "ZSTD compression", + compression: models.ZSTD, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, + shouldError: false, + }, + { + name: "LZ4 compression", + compression: models.LZ4, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_LZ4, + shouldError: false, + }, + { + name: "S2 compression", + compression: models.S2, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, + shouldError: false, + }, + { + name: "GZIP compression", + compression: models.GZIP, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_GZIP, + shouldError: false, + }, + { + name: "Snappy compression", + compression: models.Snappy, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_SNAPPY, + shouldError: false, + }, + { + name: "PGZIP compression", + compression: models.PGZIP, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_PGZIP, + shouldError: false, + }, + { + name: "None compression", + compression: models.None, + expected: backupv1.BackupCompression_BACKUP_COMPRESSION_NONE, + shouldError: false, + }, + { + name: "invalid compression", + compression: "invalid", + expected: 0, + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertBackupCompression(tt.compression) + if tt.shouldError { + assert.Error(t, err) + assert.Equal(t, backupv1.BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED, result) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} + +func TestListServiceCompression(t *testing.T) { + tests := []struct { + name string + serviceType models.ServiceType + expectedCount int + expectedError bool + errorContains string + }{ + { + name: "MySQL service type", + serviceType: models.MySQLServiceType, + expectedCount: len(models.GetSupportedCompressions(models.MySQLServiceType)), + expectedError: false, + }, + { + name: "MongoDB service type", + serviceType: models.MongoDBServiceType, + expectedCount: len(models.GetSupportedCompressions(models.MongoDBServiceType)), + expectedError: false, + }, + { + name: "PostgreSQL service type", + serviceType: models.PostgreSQLServiceType, + expectedCount: 0, + expectedError: true, + errorContains: "not yet supported", + }, + { + name: "Unknown service type", + serviceType: "unknown", + expectedCount: 0, + expectedError: true, + errorContains: "not yet supported", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + supportedCompressions := models.GetSupportedCompressions(tt.serviceType) + + if tt.expectedError { + assert.Nil(t, supportedCompressions) + } else { + assert.NotNil(t, supportedCompressions) + assert.Len(t, supportedCompressions, tt.expectedCount) + } + }) + } +} + +func TestCompressionValidation(t *testing.T) { + tests := []struct { + name string + serviceType models.ServiceType + compression models.BackupCompression + expectedValid bool + }{ + { + name: "MySQL with QuickLZ", + serviceType: models.MySQLServiceType, + compression: models.QuickLZ, + expectedValid: true, + }, + { + name: "MySQL with GZIP (not supported)", + serviceType: models.MySQLServiceType, + compression: models.GZIP, + expectedValid: false, + }, + { + name: "MongoDB with GZIP", + serviceType: models.MongoDBServiceType, + compression: models.GZIP, + expectedValid: true, + }, + { + name: "MongoDB with QuickLZ (not supported)", + serviceType: models.MongoDBServiceType, + compression: models.QuickLZ, + expectedValid: false, + }, + { + name: "PostgreSQL with any compression", + serviceType: models.PostgreSQLServiceType, + compression: models.GZIP, + expectedValid: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + supported := models.GetSupportedCompressions(tt.serviceType) + isSupported := slices.Contains(supported, tt.compression) + assert.Equal(t, tt.expectedValid, isSupported) + + err := tt.compression.ValidateForServiceType(tt.serviceType) + if tt.expectedValid { + assert.NoError(t, err) + } else { + assert.Error(t, err) + } + }) + } +} + func TestArtifactMetadataListToProto(t *testing.T) { sqlDB := testdb.Open(t, models.SkipFixtures, nil) t.Cleanup(func() { @@ -625,13 +941,14 @@ func TestArtifactMetadataListToProto(t *testing.T) { require.NotEmpty(t, loc.ID) artifact, err := models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "test_artifact", - Vendor: "test_vendor", - LocationID: loc.ID, - ServiceID: "test_service", - Mode: models.PITR, - DataModel: models.LogicalDataModel, - Status: models.PendingBackupStatus, + Name: "test_artifact", + Vendor: "test_vendor", + LocationID: loc.ID, + ServiceID: "test_service", + Mode: models.PITR, + DataModel: models.LogicalDataModel, + Status: models.PendingBackupStatus, + Compression: models.S2, }) assert.NoError(t, err) @@ -678,3 +995,80 @@ func TestArtifactMetadataListToProto(t *testing.T) { assert.Equal(t, expected, actual) } + +func TestConvertCompressionToBackupCompression(t *testing.T) { + tests := []struct { + name string + compression backupv1.BackupCompression + expected models.BackupCompression + shouldError bool + }{ + { + name: "QuickLZ compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_QUICKLZ, + expected: models.QuickLZ, + shouldError: false, + }, + { + name: "ZSTD compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_ZSTD, + expected: models.ZSTD, + shouldError: false, + }, + { + name: "LZ4 compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_LZ4, + expected: models.LZ4, + shouldError: false, + }, + { + name: "S2 compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_S2, + expected: models.S2, + shouldError: false, + }, + { + name: "GZIP compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_GZIP, + expected: models.GZIP, + shouldError: false, + }, + { + name: "Snappy compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_SNAPPY, + expected: models.Snappy, + shouldError: false, + }, + { + name: "PGZIP compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_PGZIP, + expected: models.PGZIP, + shouldError: false, + }, + { + name: "None compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_NONE, + expected: models.None, + shouldError: false, + }, + { + name: "invalid compression", + compression: backupv1.BackupCompression_BACKUP_COMPRESSION_UNSPECIFIED, + expected: models.BackupCompression(""), + shouldError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + result, err := convertCompressionToBackupCompression(tt.compression) + if tt.shouldError { + assert.Error(t, err) + assert.Equal(t, models.BackupCompression(""), result) + } else { + assert.NoError(t, err) + assert.Equal(t, tt.expected, result) + } + }) + } +} diff --git a/managed/services/management/backup/restore_service.go b/managed/services/management/backup/restore_service.go index 6d75311440c..5bd179f4e52 100644 --- a/managed/services/management/backup/restore_service.go +++ b/managed/services/management/backup/restore_service.go @@ -311,6 +311,11 @@ func convertRestoreHistoryItem( return nil, errors.Wrapf(err, "restore history item id '%s'", i.ID) } + compression, err := convertBackupCompression(artifact.Compression) + if err != nil { + return nil, errors.Wrapf(err, "restore history item id '%s'", i.ID) + } + return &backupv1.RestoreHistoryItem{ RestoreId: i.ID, ArtifactId: i.ArtifactID, @@ -325,6 +330,7 @@ func convertRestoreHistoryItem( StartedAt: startedAt, FinishedAt: finishedAt, PitrTimestamp: pitrTimestamp, + Compression: compression, }, nil } diff --git a/managed/services/preconditions_test.go b/managed/services/preconditions_test.go index f4a9a5a729d..6d248d9a232 100644 --- a/managed/services/preconditions_test.go +++ b/managed/services/preconditions_test.go @@ -242,14 +242,15 @@ func TestCheckArtifactOverlapping(t *testing.T) { require.NoError(t, err) _, err = models.CreateArtifact(db.Querier, models.CreateArtifactParams{ - Name: "test_artifact", - Vendor: "mysql", - LocationID: location.ID, - ServiceID: mysqlSvc1.ServiceID, - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, - Status: models.SuccessBackupStatus, - Folder: folder2, + Name: "test_artifact", + Vendor: "mysql", + LocationID: location.ID, + ServiceID: mysqlSvc1.ServiceID, + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Status: models.SuccessBackupStatus, + Folder: folder2, + Compression: models.Default, }) require.NoError(t, err) diff --git a/managed/services/scheduler/scheduler.go b/managed/services/scheduler/scheduler.go index 842987bbfb9..763e8c83c8f 100644 --- a/managed/services/scheduler/scheduler.go +++ b/managed/services/scheduler/scheduler.go @@ -322,6 +322,7 @@ func (s *Service) convertDBTask(dbTask *models.ScheduledTask) (Task, error) { // Retries: data.Retries, RetryInterval: data.RetryInterval, Folder: data.Folder, + Compression: data.Compression, }, } case models.ScheduledMongoDBBackupTask: @@ -341,6 +342,7 @@ func (s *Service) convertDBTask(dbTask *models.ScheduledTask) (Task, error) { // Retries: data.Retries, RetryInterval: data.RetryInterval, Folder: data.Folder, + Compression: data.Compression, }, } diff --git a/managed/services/scheduler/scheduler_test.go b/managed/services/scheduler/scheduler_test.go index cdf5d4f6fa9..336f2fa180e 100644 --- a/managed/services/scheduler/scheduler_test.go +++ b/managed/services/scheduler/scheduler_test.go @@ -93,6 +93,7 @@ func TestService(t *testing.T) { Retention: 7, Retries: 3, RetryInterval: 5 * time.Second, + Compression: models.Default, }) require.NoError(t, err) @@ -120,6 +121,7 @@ func TestService(t *testing.T) { Retention: 7, Retries: 3, RetryInterval: 5 * time.Second, + Compression: models.Default, }) require.NoError(t, err) diff --git a/managed/services/scheduler/task.go b/managed/services/scheduler/task.go index 67ccc946435..706a25c15ff 100644 --- a/managed/services/scheduler/task.go +++ b/managed/services/scheduler/task.go @@ -55,6 +55,7 @@ type BackupTaskParams struct { Retries uint32 RetryInterval time.Duration Folder string + Compression models.BackupCompression } // Validate checks backup task parameters for correctness. @@ -75,6 +76,10 @@ func (p *BackupTaskParams) Validate() error { return err } + if err := p.Compression.Validate(); err != nil { + return err + } + return p.Mode.Validate() } @@ -113,6 +118,7 @@ func (t *mySQLBackupTask) Run(ctx context.Context, scheduler *Service) error { Retries: t.Retries, RetryInterval: t.RetryInterval, Folder: t.Folder, + Compression: t.Compression, }) return err } @@ -136,6 +142,7 @@ func (t *mySQLBackupTask) Data() *models.ScheduledTaskData { Retries: t.Retries, RetryInterval: t.RetryInterval, Folder: t.Folder, + Compression: t.Compression, }, }, } @@ -176,6 +183,7 @@ func (t *mongoDBBackupTask) Run(ctx context.Context, scheduler *Service) error { Retries: t.Retries, RetryInterval: t.RetryInterval, Folder: t.Folder, + Compression: t.Compression, }) return err } @@ -199,6 +207,7 @@ func (t *mongoDBBackupTask) Data() *models.ScheduledTaskData { Retries: t.Retries, RetryInterval: t.RetryInterval, Folder: t.Folder, + Compression: t.Compression, }, }, } diff --git a/managed/services/scheduler/task_test.go b/managed/services/scheduler/task_test.go index 41e726e8cf4..5257be3b42c 100644 --- a/managed/services/scheduler/task_test.go +++ b/managed/services/scheduler/task_test.go @@ -38,121 +38,132 @@ func TestValidation(t *testing.T) { { name: "normal", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "", }, { name: "empty name", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "backup name can't be empty", }, { name: "empty serviceID", params: &BackupTaskParams{ - ServiceID: "", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "service id can't be empty", }, { name: "empty locationId", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "location id can't be empty", }, { name: "empty data model", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: "", - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: "", + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "invalid argument: empty data model", }, { name: "empty mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: "", + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: "", + Compression: models.Default, }, errMsg: "invalid argument: empty backup mode", }, { name: "invalid data model", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: "invalid", - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: "invalid", + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "invalid argument: invalid data model 'invalid'", }, { name: "invalid backup mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: "invalid", + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: "invalid", + Compression: models.Default, }, errMsg: "invalid argument: invalid backup mode 'invalid'", }, { name: "unsupported data model", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "unsupported backup data model for mySQL: logical", }, { name: "unsupported incremental backup mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Incremental, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Incremental, + Compression: models.Default, }, errMsg: "unsupported backup mode for mySQL: incremental", }, { name: "unsupported PITR backup mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.PITR, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.PITR, + Compression: models.Default, }, errMsg: "unsupported backup mode for mySQL: pitr", }, @@ -184,132 +195,144 @@ func TestValidation(t *testing.T) { { name: "normal snapshot", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.LogicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.LogicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "", }, { name: "normal PITR", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.LogicalDataModel, - Mode: models.PITR, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.LogicalDataModel, + Mode: models.PITR, + Compression: models.Default, }, errMsg: "", }, { name: "empty name", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "backup name can't be empty", }, { name: "empty serviceID", params: &BackupTaskParams{ - ServiceID: "", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "service id can't be empty", }, { name: "empty locationId", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "location id can't be empty", }, { name: "empty data model", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: "", - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: "", + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "invalid argument: empty data model", }, { name: "empty mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: "", + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: "", + Compression: models.Default, }, errMsg: "invalid argument: empty backup mode", }, { name: "invalid data model", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: "invalid", - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: "invalid", + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "invalid argument: invalid data model 'invalid'", }, { name: "invalid backup mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.LogicalDataModel, - Mode: "invalid", + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.LogicalDataModel, + Mode: "invalid", + Compression: models.Default, }, errMsg: "invalid argument: invalid backup mode 'invalid'", }, { name: "unsupported incremental backup mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.LogicalDataModel, - Mode: models.Incremental, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.LogicalDataModel, + Mode: models.Incremental, + Compression: models.Default, }, errMsg: "unsupported backup mode for mongoDB: incremental", }, { name: "no error on physical snapshot backups", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.Snapshot, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.Snapshot, + Compression: models.Default, }, errMsg: "", }, { name: "unsupported PITR backup mode", params: &BackupTaskParams{ - ServiceID: "service-id", - LocationID: "location-id", - Name: "name", - DataModel: models.PhysicalDataModel, - Mode: models.PITR, + ServiceID: "service-id", + LocationID: "location-id", + Name: "name", + DataModel: models.PhysicalDataModel, + Mode: models.PITR, + Compression: models.Default, }, errMsg: "PITR is only supported for logical backups: the specified backup model is not compatible with other parameters", }, diff --git a/managed/services/versioncache/versioncache_test.go b/managed/services/versioncache/versioncache_test.go index 32617645ed1..8bc19d2597e 100644 --- a/managed/services/versioncache/versioncache_test.go +++ b/managed/services/versioncache/versioncache_test.go @@ -92,7 +92,6 @@ func TestVersionCache(t *testing.T) { {Version: "8.0.23"}, {Version: "8.0.23"}, {Version: "8.0.23"}, - {Version: "1.1"}, } versionerMock.On("GetVersions", agentID1, softwares).Return(versions1, nil).Once() @@ -118,10 +117,6 @@ func TestVersionCache(t *testing.T) { Name: models.XbcloudSoftwareName, Version: oldVersions[2].Version, }, - { - Name: models.QpressSoftwareName, - Version: oldVersions[3].Version, - }, } require.Equal(t, softwareVersions, v.SoftwareVersions) @@ -135,7 +130,6 @@ func TestVersionCache(t *testing.T) { {Version: "8.0.24"}, {Version: "5.0.25"}, {Version: "5.0.25"}, - {Version: "0.1"}, } mockGetVersions(versions1, versions2, false) mockGetVersions(versions2, versions2, true)