diff --git a/go.mod b/go.mod
index d10e015289..5fdfbe78cf 100644
--- a/go.mod
+++ b/go.mod
@@ -18,7 +18,7 @@ require (
github.com/IBM/event-notifications-go-admin-sdk v0.18.0
github.com/IBM/eventstreams-go-sdk v1.4.0
github.com/IBM/go-sdk-core/v5 v5.21.0
- github.com/IBM/ibm-backup-recovery-sdk-go v1.0.3
+ github.com/IBM/ibm-backup-recovery-sdk-go v1.0.5
github.com/IBM/ibm-cos-sdk-go v1.12.2
github.com/IBM/ibm-cos-sdk-go-config/v2 v2.3.0
github.com/IBM/ibm-hpcs-tke-sdk v0.0.0-20250305134146-e023c2e84762
@@ -59,7 +59,7 @@ require (
github.com/pkg/errors v0.9.1
github.com/rook/rook/pkg/apis v0.0.0-20250619203122-80563e28b685
github.com/softlayer/softlayer-go v1.0.3
- github.com/stretchr/testify v1.10.0
+ github.com/stretchr/testify v1.11.1
golang.org/x/crypto v0.41.0
gopkg.in/yaml.v3 v3.0.1
gotest.tools v2.2.0+incompatible
diff --git a/go.sum b/go.sum
index 83d14d411d..fd94e54614 100644
--- a/go.sum
+++ b/go.sum
@@ -127,8 +127,8 @@ github.com/IBM/go-sdk-core/v5 v5.10.2/go.mod h1:WZPFasUzsKab/2mzt29xPcfruSk5js2y
github.com/IBM/go-sdk-core/v5 v5.17.4/go.mod h1:KsAAI7eStAWwQa4F96MLy+whYSh39JzNjklZRbN/8ns=
github.com/IBM/go-sdk-core/v5 v5.21.0 h1:DUnYhvC4SoC8T84rx5omnhY3+xcQg/Whyoa3mDPIMkk=
github.com/IBM/go-sdk-core/v5 v5.21.0/go.mod h1:Q3BYO6iDA2zweQPDGbNTtqft5tDcEpm6RTuqMlPcvbw=
-github.com/IBM/ibm-backup-recovery-sdk-go v1.0.3 h1:9TZHocmCfgmF8TGVrpP1kFyQbjcqLNW7+bM07lefpKQ=
-github.com/IBM/ibm-backup-recovery-sdk-go v1.0.3/go.mod h1:jsYutWlnGysdCNoAk0zoIXb2vfKM9TmMpiUkI4pVuXY=
+github.com/IBM/ibm-backup-recovery-sdk-go v1.0.5 h1:bVf67pkGi5dp8SQXMcam8c1JrEwNRkXVcz3FkaalmYg=
+github.com/IBM/ibm-backup-recovery-sdk-go v1.0.5/go.mod h1:m+7lHOrXJwYPbrdxMmKl96tT7OJMMhpyKnQgnaCcy+w=
github.com/IBM/ibm-cos-sdk-go v1.12.2 h1:71A4tDl8u6BZ548h71ecEe7fw5bBA7ECTVqYmeSQWQA=
github.com/IBM/ibm-cos-sdk-go v1.12.2/go.mod h1:ODYcmrmdpjo5hVguq9RbD6xmC8xb1XZMG7NefUbJNcc=
github.com/IBM/ibm-cos-sdk-go-config/v2 v2.3.0 h1:956Nqk0eKI3lq+AkzWXZDid4UZHRz0wWh1LwkleBsWk=
@@ -1164,8 +1164,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
-github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
+github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw=
github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk=
github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0=
diff --git a/ibm/conns/config.go b/ibm/conns/config.go
index e70750b2f0..62a29d74b3 100644
--- a/ibm/conns/config.go
+++ b/ibm/conns/config.go
@@ -1664,7 +1664,7 @@ func (c *Config) ClientSession() (interface{}, error) {
}
// Construct the service options.
- var backupRecoveryURL string
+ var backupRecoveryURL string = "https://default.backup-recovery.cloud.ibm.com/v2"
var backupRecoveryConnectorURL string
if fileMap != nil && c.Visibility != "public-and-private" {
@@ -1676,9 +1676,7 @@ func (c *Config) ClientSession() (interface{}, error) {
Authenticator: authenticator,
URL: EnvFallBack([]string{"IBMCLOUD_BACKUP_RECOVERY_ENDPOINT"}, backupRecoveryURL),
}
- if backupRecoveryClientOptions.URL == "" {
- session.backupRecoveryClientErr = fmt.Errorf("IBMCLOUD_BACKUP_RECOVERY_ENDPOINT not set in env or endpoints file")
- }
+
// Construct the service client.
session.backupRecoveryClient, err = backuprecoveryv1.NewBackupRecoveryV1(backupRecoveryClientOptions)
if err != nil {
diff --git a/ibm/provider/provider.go b/ibm/provider/provider.go
index 401a374957..c7e5de12e4 100644
--- a/ibm/provider/provider.go
+++ b/ibm/provider/provider.go
@@ -266,31 +266,31 @@ func Provider() *schema.Provider {
"ibm_config_aggregator_resource_collection_status": configurationaggregator.AddConfigurationAggregatorInstanceFields(configurationaggregator.DataSourceIbmConfigAggregatorResourceCollectionStatus()),
// // BackupAndRecovery
- "ibm_backup_recovery_agent_upgrade_tasks": backuprecovery.DataSourceIbmBackupRecoveryAgentUpgradeTasks(),
- "ibm_backup_recovery_download_agent": backuprecovery.DataSourceIbmBackupRecoveryDownloadAgent(),
- "ibm_backup_recovery_search_indexed_object": backuprecovery.DataSourceIbmBackupRecoverySearchIndexedObject(),
- "ibm_backup_recovery_object_snapshots": backuprecovery.DataSourceIbmBackupRecoveryObjectSnapshots(),
- "ibm_backup_recovery_connectors_metadata": backuprecovery.DataSourceIbmBackupRecoveryConnectorsMetadata(),
- "ibm_backup_recovery_connector_logs": backuprecovery.DataSourceIbmBackupRecoveryConnectorLogs(),
- "ibm_backup_recovery_connector_status": backuprecovery.DataSourceIbmBackupRecoveryConnectorStatus(),
- "ibm_backup_recovery_data_source_connections": backuprecovery.DataSourceIbmBackupRecoveryDataSourceConnections(),
- "ibm_backup_recovery_data_source_connectors": backuprecovery.DataSourceIbmBackupRecoveryDataSourceConnectors(),
- "ibm_backup_recovery_search_objects": backuprecovery.DataSourceIbmBackupRecoverySearchObjects(),
- "ibm_backup_recovery_search_protected_objects": backuprecovery.DataSourceIbmBackupRecoverySearchProtectedObjects(),
- "ibm_backup_recovery_protection_group": backuprecovery.DataSourceIbmBackupRecoveryProtectionGroup(),
- "ibm_backup_recovery_protection_groups": backuprecovery.DataSourceIbmBackupRecoveryProtectionGroups(),
- "ibm_backup_recovery_protection_group_runs": backuprecovery.DataSourceIbmBackupRecoveryProtectionGroupRuns(),
- "ibm_backup_recovery_protection_policies": backuprecovery.DataSourceIbmBackupRecoveryProtectionPolicies(),
- "ibm_backup_recovery_protection_policy": backuprecovery.DataSourceIbmBackupRecoveryProtectionPolicy(),
- "ibm_backup_recovery": backuprecovery.DataSourceIbmBackupRecovery(),
- "ibm_backup_recoveries": backuprecovery.DataSourceIbmBackupRecoveries(),
- "ibm_backup_recovery_download_files": backuprecovery.DataSourceIbmBackupRecoveryDownloadFiles(),
- "ibm_backup_recovery_source_registrations": backuprecovery.DataSourceIbmBackupRecoverySourceRegistrations(),
- "ibm_backup_recovery_source_registration": backuprecovery.DataSourceIbmBackupRecoverySourceRegistration(),
- "ibm_backup_recovery_download_indexed_files": backuprecovery.DataSourceIbmBackupRecoveryDownloadIndexedFiles(),
- "ibm_backup_recovery_protection_sources": backuprecovery.DataSourceIbmBackupRecoveryProtectionSources(),
- "ibm_backup_recovery_connector_get_users": backuprecovery.DataSourceIbmBackupRecoveryConnectorGetUsers(),
-
+ // // BackupAndRecovery
+ "ibm_backup_recovery_agent_upgrade_tasks": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryAgentUpgradeTasks()),
+ "ibm_backup_recovery_download_agent": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryDownloadAgent()),
+ "ibm_backup_recovery_search_indexed_object": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoverySearchIndexedObject()),
+ "ibm_backup_recovery_object_snapshots": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryObjectSnapshots()),
+ "ibm_backup_recovery_connectors_metadata": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryConnectorsMetadata()),
+ "ibm_backup_recovery_connector_logs": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryConnectorLogs()),
+ "ibm_backup_recovery_connector_status": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryConnectorStatus()),
+ "ibm_backup_recovery_data_source_connections": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryDataSourceConnections()),
+ "ibm_backup_recovery_data_source_connectors": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryDataSourceConnectors()),
+ "ibm_backup_recovery_search_objects": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoverySearchObjects()),
+ "ibm_backup_recovery_search_protected_objects": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoverySearchProtectedObjects()),
+ "ibm_backup_recovery_protection_group": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryProtectionGroup()),
+ "ibm_backup_recovery_protection_groups": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryProtectionGroups()),
+ "ibm_backup_recovery_protection_group_runs": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryProtectionGroupRuns()),
+ "ibm_backup_recovery_protection_policies": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryProtectionPolicies()),
+ "ibm_backup_recovery_protection_policy": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryProtectionPolicy()),
+ "ibm_backup_recovery": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecovery()),
+ "ibm_backup_recoveries": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveries()),
+ "ibm_backup_recovery_download_files": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryDownloadFiles()),
+ "ibm_backup_recovery_source_registrations": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoverySourceRegistrations()),
+ "ibm_backup_recovery_source_registration": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoverySourceRegistration()),
+ "ibm_backup_recovery_download_indexed_files": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryDownloadIndexedFiles()),
+ "ibm_backup_recovery_protection_sources": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryProtectionSources()),
+ "ibm_backup_recovery_connector_get_users": backuprecovery.AddInstanceFields(backuprecovery.DataSourceIbmBackupRecoveryConnectorGetUsers()),
// // AppID
"ibm_appid_action_url": appid.DataSourceIBMAppIDActionURL(),
"ibm_appid_apm": appid.DataSourceIBMAppIDAPM(),
@@ -1121,22 +1121,22 @@ func Provider() *schema.Provider {
},
ResourcesMap: map[string]*schema.Resource{
- "ibm_backup_recovery_agent_upgrade_task": backuprecovery.ResourceIbmBackupRecoveryAgentUpgradeTask(),
- "ibm_backup_recovery_protection_group_run_request": backuprecovery.ResourceIbmBackupRecoveryProtectionGroupRunRequest(),
- "ibm_backup_recovery_data_source_connection": backuprecovery.ResourceIbmBackupRecoveryDataSourceConnection(),
- "ibm_backup_recovery_data_source_connector_patch": backuprecovery.ResourceIbmBackupRecoveryDataSourceConnectorPatch(),
- "ibm_backup_recovery_download_files_folders": backuprecovery.ResourceIbmBackupRecoveryDownloadFilesFolders(),
- "ibm_backup_recovery_restore_points": backuprecovery.ResourceIbmBackupRecoveryRestorePoints(),
- "ibm_backup_recovery_perform_action_on_protection_group_run_request": backuprecovery.ResourceIbmBackupRecoveryPerformActionOnProtectionGroupRunRequest(),
- "ibm_backup_recovery_protection_group": backuprecovery.ResourceIbmBackupRecoveryProtectionGroup(),
- "ibm_backup_recovery_protection_policy": backuprecovery.ResourceIbmBackupRecoveryProtectionPolicy(),
- "ibm_backup_recovery": backuprecovery.ResourceIbmBackupRecovery(),
- "ibm_backup_recovery_source_registration": backuprecovery.ResourceIbmBackupRecoverySourceRegistration(),
- "ibm_backup_recovery_update_protection_group_run_request": backuprecovery.ResourceIbmBackupRecoveryUpdateProtectionGroupRunRequest(),
- "ibm_backup_recovery_connection_registration_token": backuprecovery.ResourceIbmBackupRecoveryConnectionRegistrationToken(),
- "ibm_backup_recovery_connector_registration": backuprecovery.ResourceIbmBackupRecoveryConnectorRegistration(),
- "ibm_backup_recovery_connector_access_token": backuprecovery.ResourceIbmBackupRecoveryConnectorAccessToken(),
- "ibm_backup_recovery_connector_update_user": backuprecovery.ResourceIbmBackupRecoveryConnectorUpdateUser(),
+ "ibm_backup_recovery_agent_upgrade_task": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryAgentUpgradeTask()),
+ "ibm_backup_recovery_protection_group_run_request": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryProtectionGroupRunRequest()),
+ "ibm_backup_recovery_data_source_connection": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryDataSourceConnection()),
+ "ibm_backup_recovery_data_source_connector_patch": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryDataSourceConnectorPatch()),
+ "ibm_backup_recovery_download_files_folders": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryDownloadFilesFolders()),
+ "ibm_backup_recovery_restore_points": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryRestorePoints()),
+ "ibm_backup_recovery_perform_action_on_protection_group_run_request": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryPerformActionOnProtectionGroupRunRequest()),
+ "ibm_backup_recovery_protection_group": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryProtectionGroup()),
+ "ibm_backup_recovery_protection_policy": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryProtectionPolicy()),
+ "ibm_backup_recovery": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecovery()),
+ "ibm_backup_recovery_source_registration": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoverySourceRegistration()),
+ "ibm_backup_recovery_update_protection_group_run_request": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryUpdateProtectionGroupRunRequest()),
+ "ibm_backup_recovery_connection_registration_token": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryConnectionRegistrationToken()),
+ "ibm_backup_recovery_connector_registration": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryConnectorRegistration()),
+ "ibm_backup_recovery_connector_access_token": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryConnectorAccessToken()),
+ "ibm_backup_recovery_connector_update_user": backuprecovery.AddInstanceFields(backuprecovery.ResourceIbmBackupRecoveryConnectorUpdateUser()),
"ibm_app": cloudfoundry.ResourceIBMApp(),
"ibm_app_domain_private": cloudfoundry.ResourceIBMAppDomainPrivate(),
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recoveries.go b/ibm/service/backuprecovery/data_source_ibm_backup_recoveries.go
index 22fd802ea6..2cdaa51ea1 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recoveries.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recoveries.go
@@ -13,13 +13,12 @@ import (
"log"
"time"
- "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
- "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
-
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/diag"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
func DataSourceIbmBackupRecoveries() *schema.Resource {
@@ -2282,6 +2281,18 @@ func dataSourceIbmBackupRecoveriesRead(context context.Context, d *schema.Resour
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
getRecoveriesOptions := &backuprecoveryv1.GetRecoveriesOptions{}
getRecoveriesOptions.SetXIBMTenantID(d.Get("x_ibm_tenant_id").(string))
@@ -2351,7 +2362,6 @@ func dataSourceIbmBackupRecoveriesRead(context context.Context, d *schema.Resour
}
d.SetId(dataSourceIbmBackupRecoveriesID(d))
-
if !core.IsNil(recoveriesResponse.Recoveries) {
recoveries := []map[string]interface{}{}
for _, recoveriesItem := range recoveriesResponse.Recoveries {
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recoveries_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recoveries_test.go
index 2de1b37fb3..59f7d27158 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recoveries_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recoveries_test.go
@@ -20,9 +20,9 @@ import (
func TestAccIbmBackupRecoveriesDataSourceBasic(t *testing.T) {
name := fmt.Sprintf("tf_recovery_name_%d", acctest.RandIntRange(10, 100))
snapshotEnvironment := "kPhysical"
- objectId := 3
+ objectId := 344
targetenvironment := "kPhysical"
- absolutePath := "/data/"
+ absolutePath := "/mnt"
restoreEntityType := "kRegular"
recoveryAction := "RecoverFiles"
@@ -51,12 +51,14 @@ func testAccCheckIbmBackupRecoveriesDataSourceConfigBasic(objectId int, name, sn
data "ibm_backup_recovery_object_snapshots" "object_snapshot" {
x_ibm_tenant_id = "%s"
+
object_id = %d
}
resource "ibm_backup_recovery" "baas_recovery_instance" {
x_ibm_tenant_id = "%s"
snapshot_environment = "%s"
+
name = "%s"
physical_params {
recovery_action = "%s"
@@ -81,6 +83,7 @@ func testAccCheckIbmBackupRecoveriesDataSourceConfigBasic(objectId int, name, sn
data "ibm_backup_recoveries" "baas_recoveries_instance" {
x_ibm_tenant_id = "%[1]s"
+
ids = [ ibm_backup_recovery.baas_recovery_instance.recovery_id ]
}
`, tenantId, objectId, tenantId, snapshotEnvironment, name, recoveryAction, targetenvironment, absolutePath, objectId, restoreEntityType, absolutePath)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery.go
index 16531d36bf..904cf46ca1 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery.go
@@ -2208,6 +2208,17 @@ func dataSourceIbmBackupRecoveryRead(context context.Context, d *schema.Resource
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getRecoveryByIdOptions := &backuprecoveryv1.GetRecoveryByIdOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks.go
index ea75c6b8da..4117fd3831 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks.go
@@ -224,6 +224,17 @@ func dataSourceIbmBackupRecoveryAgentUpgradeTasksRead(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getUpgradeTasksOptions := &backuprecoveryv1.GetUpgradeTasksOptions{}
@@ -245,7 +256,6 @@ func dataSourceIbmBackupRecoveryAgentUpgradeTasksRead(context context.Context, d
}
d.SetId(dataSourceIbmBackupRecoveryAgentUpgradeTasksID(d))
-
if !core.IsNil(agentUpgradeTaskStates.Tasks) {
tasks := []map[string]interface{}{}
for _, tasksItem := range agentUpgradeTaskStates.Tasks {
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks_test.go
index 420b89e3bd..405952c953 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_agent_upgrade_tasks_test.go
@@ -20,7 +20,7 @@ import (
func TestAccIbmBackupRecoveryAgentUpgradeTasksDataSourceBasic(t *testing.T) {
name := fmt.Sprintf("tf_name_upgarde_task_%d", acctest.RandIntRange(10, 100))
- agentId := 19
+ agentId := 346
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -48,11 +48,13 @@ func testAccCheckIbmBackupRecoveryAgentUpgradeTasksDataSourceConfigBasic(name st
resource "ibm_backup_recovery_agent_upgrade_task" "baas_agent_upgrade_task_instance" {
x_ibm_tenant_id = "%s"
agent_ids = [%d]
+
name = "%s"
description = "Includes Agents for Sources RHEL, Win Server and MS SQL"
}
data "ibm_backup_recovery_agent_upgrade_tasks" "baas_agent_upgrade_tasks_instance" {
x_ibm_tenant_id = "%[1]s"
+
ids = [ibm_backup_recovery_agent_upgrade_task.baas_agent_upgrade_task_instance.id]
}
`, tenantId, agentId, name)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata.go
index b888504c9b..d018892469 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata.go
@@ -71,6 +71,17 @@ func dataSourceIbmBackupRecoveryConnectorsMetadataRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getConnectorMetadataOptions := &backuprecoveryv1.GetConnectorMetadataOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata_test.go
index 66cad0f4fd..d1a6156c35 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_connectors_metadata_test.go
@@ -40,6 +40,7 @@ func testAccCheckIbmBackupRecoveryConnectorsMetadataDataSourceConfigBasic() stri
return fmt.Sprintf(`
data "ibm_backup_recovery_connectors_metadata" "baas_connectors_metadata_instance" {
x_ibm_tenant_id = "%s"
+
}
`, tenantId)
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections.go
index 7a414b9c05..1bfa926977 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections.go
@@ -100,6 +100,17 @@ func dataSourceIbmBackupRecoveryDataSourceConnectionsRead(context context.Contex
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getDataSourceConnectionsOptions := &backuprecoveryv1.GetDataSourceConnectionsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections_test.go
index f2d8bc2dfd..2ecbaea891 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connections_test.go
@@ -18,7 +18,7 @@ import (
)
const (
- tenantId string = "jhxqx715r9/"
+ tenantId string = "wkk1yqrdce/"
)
func TestAccIbmBackupRecoveryDataSourceConnectionsDataSourceBasic(t *testing.T) {
@@ -49,11 +49,13 @@ func testAccCheckIbmBackupRecoveryDataSourceConnectionsDataSourceConfigBasic(con
resource "ibm_backup_recovery_data_source_connection" "baas_data_source_connection_instance" {
x_ibm_tenant_id = "%s"
+
connection_name = "%s"
}
data "ibm_backup_recovery_data_source_connections" "baas_data_source_connections_instance" {
x_ibm_tenant_id = ibm_backup_recovery_data_source_connection.baas_data_source_connection_instance.x_ibm_tenant_id
+
connection_ids = [ibm_backup_recovery_data_source_connection.baas_data_source_connection_instance.connection_id]
}
`, tenantId, connectionName)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors.go
index 692901ec6c..93d2a28a09 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors.go
@@ -158,6 +158,18 @@ func dataSourceIbmBackupRecoveryDataSourceConnectorsRead(context context.Context
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
getDataSourceConnectorsOptions := &backuprecoveryv1.GetDataSourceConnectorsOptions{}
getDataSourceConnectorsOptions.SetXIBMTenantID(d.Get("x_ibm_tenant_id").(string))
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors_test.go
index 0930cef57e..7d1625225d 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_data_source_connectors_test.go
@@ -17,7 +17,7 @@ import (
)
func TestAccIbmBackupRecoveryDataSourceConnectorsDataSourceBasic(t *testing.T) {
- dataSourceConnectorConnectionId := "6456884682673709176"
+ dataSourceConnectorConnectionId := "5128356219792164864"
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
@@ -45,6 +45,7 @@ func testAccCheckIbmBackupRecoveryDataSourceConnectorsDataSourceConfigBasic(data
return fmt.Sprintf(`
data "ibm_backup_recovery_data_source_connectors" "baas_data_source_connectors_instance" {
x_ibm_tenant_id = "%s"
+
connection_id = "%s"
}
`, tenantId, dataSourceConnectorConnectioID)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent.go
index 945fa3c2d9..e6a98ae278 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent.go
@@ -70,6 +70,17 @@ func DataSourceIbmBackupRecoveryDownloadAgentRead(context context.Context, d *sc
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
downloadAgentOptions := &backuprecoveryv1.DownloadAgentOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent_test.go
index 03d0da5424..f76d09ea95 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_agent_test.go
@@ -64,6 +64,7 @@ func testAccCheckIbmBackupRecoveryDownloadAgentDataSourceConfigBasic(filePath st
return fmt.Sprintf(`
data "ibm_backup_recovery_download_agent" "baas_download_agent_instance" {
x_ibm_tenant_id = "%s"
+
platform = "kWindows"
file_path = "%s"
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files.go
index da5d00301a..75eb835fce 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files.go
@@ -77,6 +77,17 @@ func dataSourceIbmBackupRecoveryDownloadFilesRead(context context.Context, d *sc
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
downloadFilesFromRecoveryOptions := &backuprecoveryv1.DownloadFilesFromRecoveryOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files_test.go
index 4c300cce7a..bcfe8222eb 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_files_test.go
@@ -19,7 +19,7 @@ import (
func TestAccIbmRecoveryDownloadFilesDataSourceBasic(t *testing.T) {
name := fmt.Sprintf("tf_recovery_download_files_folders_name_%d", acctest.RandIntRange(10, 100))
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -40,21 +40,24 @@ func testAccCheckIbmRecoveryDownloadFilesDataSourceConfigBasic(name string, obje
return fmt.Sprintf(`
data "ibm_backup_recovery_object_snapshots" "baas_object_snapshots_instance" {
x_ibm_tenant_id = "%s"
+
object_id = %d
}
resource "ibm_backup_recovery_download_files_folders" "baas_recovery_download_files_folders_instance" {
x_ibm_tenant_id = "%s"
name = "%s"
+
object {
snapshot_id = data.ibm_backup_recovery_object_snapshots.baas_object_snapshots_instance.snapshots[0].id
}
files_and_folders {
- absolute_path = "/data/"
+ absolute_path = "/mnt"
}
}
data "ibm_backup_recovery_download_files" "recovery_download_files_instance" {
x_ibm_tenant_id = "%[1]s"
+
recovery_download_files_id = ibm_backup_recovery_download_files_folders.baas_recovery_download_files_folders_instance.id
}
`, tenantId, objectId, tenantId, name)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files.go
index 8fe7d49dd7..1c2ac46939 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files.go
@@ -72,6 +72,17 @@ func dataSourceIbmBackupRecoveryDownloadIndexedFilesRead(context context.Context
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
downloadIndexedFileOptions := &backuprecoveryv1.DownloadIndexedFileOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files_test.go
index aa9ddf408d..6058febb7a 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_download_indexed_files_test.go
@@ -17,7 +17,7 @@ import (
)
func TestAccIbmBackupRecoveryDownloadIndexedFilesDataSourceBasic(t *testing.T) {
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -38,13 +38,15 @@ func testAccCheckIbmBackupRecoveryDownloadIndexedFilesDataSourceConfigBasic(obje
return fmt.Sprintf(`
data "ibm_backup_recovery_object_snapshots" "baas_object_snapshots_instance" {
x_ibm_tenant_id = "%s"
+
object_id = %d
}
data "ibm_backup_recovery_download_indexed_files" "baas_download_indexed_files_instance" {
x_ibm_tenant_id = "%s"
snapshots_id = data.ibm_backup_recovery_object_snapshots.baas_object_snapshots_instance.snapshots.0.id
- file_path = "/data/"
+
+ file_path = "/mnt"
}
`, tenantId, objectId, tenantId)
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots.go
index 10cfd18a99..2a942a3ad4 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots.go
@@ -627,6 +627,17 @@ func dataSourceIbmBackupRecoveryObjectSnapshotsRead(context context.Context, d *
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getObjectSnapshotsOptions := &backuprecoveryv1.GetObjectSnapshotsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots_test.go
index 8a4793e260..095f71041f 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_object_snapshots_test.go
@@ -18,7 +18,7 @@ import (
)
func TestAccIbmBackupRecoveryObjectSnapshotsDataSourceBasic(t *testing.T) {
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -60,6 +60,7 @@ func testAccCheckIbmBackupRecoveryObjectSnapshotsDataSourceConfigBasic(objectId
data "ibm_backup_recovery_object_snapshots" "baas_object_snapshots_instance" {
x_ibm_tenant_id = "%s"
object_id = %d
+
}
`, tenantId, objectId)
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group.go
index dcf8a69a29..d92fcc04b9 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group.go
@@ -4641,6 +4641,18 @@ func dataSourceIbmBackupRecoveryProtectionGroupRead(context context.Context, d *
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
tenantId := d.Get("x_ibm_tenant_id").(string)
getProtectionGroupByIdOptions := &backuprecoveryv1.GetProtectionGroupByIdOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs.go
index 72db9dfbd6..58bba95a62 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs.go
@@ -3051,6 +3051,17 @@ func dataSourceIbmBackupRecoveryProtectionGroupRunsRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getProtectionGroupRunsOptions := &backuprecoveryv1.GetProtectionGroupRunsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs_test.go
index 476ed75902..3a3f405291 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_runs_test.go
@@ -17,7 +17,7 @@ import (
)
func TestAccIbmBackupRecoveryProtectionGroupRunsDataSourceBasic(t *testing.T) {
- groupName := "terra-test-group-1"
+ groupName := "tetst-terra-group-4"
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -51,11 +51,13 @@ func testAccCheckIbmBackupRecoveryProtectionGroupRunsDataSourceConfigBasic(group
return fmt.Sprintf(`
data "ibm_backup_recovery_protection_groups" "ibm_backup_recovery_protection_groups_instance" {
x_ibm_tenant_id = "%s"
+
names = ["%s"]
}
data "ibm_backup_recovery_protection_group_runs" "baas_protection_group_runs_instance" {
x_ibm_tenant_id = "%[1]s"
+
protection_group_id = data.ibm_backup_recovery_protection_groups.ibm_backup_recovery_protection_groups_instance.protection_groups.0.id
}
`, tenantId, groupName)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_test.go
index 4170910dd9..12d924e80a 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_group_test.go
@@ -21,7 +21,7 @@ import (
func TestAccIbmBackupRecoveryProtectionGroupDataSourceBasic(t *testing.T) {
groupName := fmt.Sprintf("tf_groupname_%d", acctest.RandIntRange(10, 100))
policyName := fmt.Sprintf("tf_policyname_%d", acctest.RandIntRange(10, 100))
- objectId := 18
+ objectId := 344
environment := "kPhysical"
includedPath := "/data2/data/"
protectionType := "kFile"
@@ -52,6 +52,7 @@ func testAccCheckIbmBackupRecoveryProtectionGroupDataSourceConfigBasic(name, env
resource "ibm_backup_recovery_protection_policy" "baas_protection_policy_instance" {
x_ibm_tenant_id = "%s"
name = "%s"
+
backup_policy {
regular {
incremental{
@@ -79,6 +80,7 @@ func testAccCheckIbmBackupRecoveryProtectionGroupDataSourceConfigBasic(name, env
resource "ibm_backup_recovery_protection_group" "baas_protection_group_instance" {
x_ibm_tenant_id = "%s"
+
policy_id = ibm_backup_recovery_protection_policy.baas_protection_policy_instance.policy_id
name = "%s"
environment = "%s"
@@ -97,6 +99,7 @@ func testAccCheckIbmBackupRecoveryProtectionGroupDataSourceConfigBasic(name, env
data "ibm_backup_recovery_protection_group" "baas_protection_group_instance" {
protection_group_id = ibm_backup_recovery_protection_group.baas_protection_group_instance.group_id
+
x_ibm_tenant_id = "%[1]s"
}
`, tenantId, policyName, tenantId, name, environment, protectionType, objectId, includedPath)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups.go
index 0c2cf5b205..1a8a5fbc30 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups.go
@@ -4756,6 +4756,17 @@ func dataSourceIbmBackupRecoveryProtectionGroupsRead(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getProtectionGroupsOptions := &backuprecoveryv1.GetProtectionGroupsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups_test.go
index 72961c3467..8fb9ddf4d3 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_groups_test.go
@@ -20,7 +20,7 @@ import (
func TestAccIbmBackupRecoveryProtectionGroupsDataSourceBasic(t *testing.T) {
groupName := fmt.Sprintf("tf_groupname_%d", acctest.RandIntRange(10, 100))
policyName := fmt.Sprintf("tf_policyname_%d", acctest.RandIntRange(10, 100))
- objectId := 18
+ objectId := 344
environment := "kPhysical"
includedPath := "/data1/data/dat2/"
protectionType := "kFile"
@@ -53,6 +53,8 @@ func testAccCheckIbmBackupRecoveryProtectionGroupsDataSourceConfigBasic(name, en
resource "ibm_backup_recovery_protection_policy" "baas_protection_policy_instance" {
x_ibm_tenant_id = "%s"
name = "%s"
+
+
backup_policy {
regular {
incremental{
@@ -81,6 +83,7 @@ func testAccCheckIbmBackupRecoveryProtectionGroupsDataSourceConfigBasic(name, en
resource "ibm_backup_recovery_protection_group" "baas_protection_group_instance" {
x_ibm_tenant_id = "%s"
policy_id = ibm_backup_recovery_protection_policy.baas_protection_policy_instance.policy_id
+
name = "%s"
environment = "%s"
physical_params {
@@ -97,6 +100,7 @@ func testAccCheckIbmBackupRecoveryProtectionGroupsDataSourceConfigBasic(name, en
}
data "ibm_backup_recovery_protection_groups" "baas_protection_groups_instance" {
x_ibm_tenant_id = "%[1]s"
+
ids = [ ibm_backup_recovery_protection_group.baas_protection_group_instance.group_id ]
}
`, tenantId, policyName, tenantId, name, environment, protectionType, objectId, includedPath)
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies.go
index f336747967..969528aa8d 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies.go
@@ -4059,6 +4059,18 @@ func dataSourceIbmBackupRecoveryProtectionPoliciesRead(context context.Context,
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
getProtectionPoliciesOptions := &backuprecoveryv1.GetProtectionPoliciesOptions{}
getProtectionPoliciesOptions.SetXIBMTenantID(d.Get("x_ibm_tenant_id").(string))
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies_test.go
index 3f50e05ed9..05f552e1a9 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policies_test.go
@@ -43,6 +43,7 @@ func testAccCheckIbmBackupRecoveryProtectionPoliciesDataSourceConfigBasic(name s
return fmt.Sprintf(`
resource "ibm_backup_recovery_protection_policy" "baas_protection_policy_instance" {
x_ibm_tenant_id = "%s"
+
name = "%s"
backup_policy {
regular {
@@ -71,6 +72,7 @@ func testAccCheckIbmBackupRecoveryProtectionPoliciesDataSourceConfigBasic(name s
data "ibm_backup_recovery_protection_policies" "baas_protection_policies_instance" {
ids = [ibm_backup_recovery_protection_policy.baas_protection_policy_instance.policy_id]
+
x_ibm_tenant_id = "%[1]s"
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy.go
index c0f76f14ea..5be80ae94a 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy.go
@@ -4015,6 +4015,18 @@ func dataSourceIbmBackupRecoveryProtectionPolicyRead(context context.Context, d
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
tenantId := d.Get("x_ibm_tenant_id").(string)
getProtectionPolicyByIdOptions := &backuprecoveryv1.GetProtectionPolicyByIdOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy_test.go
index 2a9623e687..593873bebb 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_policy_test.go
@@ -48,6 +48,7 @@ func testAccCheckIbmBackupRecoveryProtectionPolicyDataSourceConfigBasic(name str
return fmt.Sprintf(`
resource "ibm_backup_recovery_protection_policy" "baas_protection_policy_instance" {
x_ibm_tenant_id = "%s"
+
name = "%s"
backup_policy {
regular {
@@ -76,6 +77,7 @@ func testAccCheckIbmBackupRecoveryProtectionPolicyDataSourceConfigBasic(name str
data "ibm_backup_recovery_protection_policy" "baas_protection_policy_instance" {
protection_policy_id = ibm_backup_recovery_protection_policy.baas_protection_policy_instance.policy_id
+
x_ibm_tenant_id = "%[1]s"
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources.go
index f99adc6115..1745287d59 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources.go
@@ -6014,7 +6014,10921 @@ func DataSourceIbmBackupRecoveryProtectionSources() *schema.Resource {
Computed: true,
Description: "Specifies children of the current node in the Protection Sources hierarchy. When representing Objects in memory, the entire Object subtree hierarchy is represented. You can use this subtree to navigate down the Object hierarchy.",
Elem: &schema.Resource{
- Schema: map[string]*schema.Schema{},
+ Schema: map[string]*schema.Schema{
+ "application_nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the child subtree used to store additional application-level Objects. Different environments use the subtree to store application-level information. For example for SQL Server, this subtree stores the SQL Server instances running on a VM.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies children of the current node in the Protection Sources hierarchy.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the child subtree used to store additional application-level Objects. Different environments use the subtree to store application-level information. For example for SQL Server, this subtree stores the SQL Server instances running on a VM.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{},
+ },
+ },
+ "entity_pagination_parameters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the cursor based pagination parameters for Protection Source and its children. Pagination is supported at a given level within the Protection Source Hierarchy with the help of before or after cursors. A Cursor will always refer to a specific source within the source dataset but will be invalidated if the item is removed.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "after_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id starting from which the items are to be returned.",
+ },
+ "before_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id upto which the items are to be returned.",
+ },
+ "node_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id for the Node at any level within the Source entity hierarchy whose children are to be paginated.",
+ },
+ "page_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the maximum number of entities to be returned within the page.",
+ },
+ },
+ },
+ },
+ "entity_permission_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the permission information of entities.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic group details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies group name of the group.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_ids": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the tenants to which the group belongs to.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "is_inferred": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the Entity Permission Information is inferred or not. For example, SQL application hosted over vCenter will have inferred entity permission information.",
+ },
+ "is_registered_by_sp": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this entity is registered by the SP or not. This will be populated only if the entity is a root entity. Refer to magneto/base/permissions.proto for details.",
+ },
+ "registering_tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant id that registered this entity. This will be populated only if the entity is a root entity.",
+ },
+ "tenant": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic tenant details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "bifrost_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this tenant is bifrost enabled or not.",
+ },
+ "is_managed_on_helios": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this tenant is manged on helios.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies name of the tenant.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "users": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic user details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant to which the user belongs to.",
+ },
+ "user_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies user name of the user.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the logical size of the data in bytes for the Object on this node. Presence of this field indicates this node is a leaf node.",
+ },
+ "nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies children of the current node in the Protection Sources hierarchy. When representing Objects in memory, the entire Object subtree hierarchy is represented. You can use this subtree to navigate down the Object hierarchy.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{},
+ },
+ },
+ "object_protection_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Object Protection Info of the Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "auto_protect_parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the auto protect parent id if this entity is protected based on auto protection. This is only specified for leaf entities.",
+ },
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "has_active_object_protection_spec": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies if the entity is under object protection.",
+ },
+ },
+ },
+ },
+ "protected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Protected Objects. Specifies aggregated information about all the child Objects of this node that are currently protected by a Protection Job. There is one entry for each environment that is being backed up. The aggregated information for the Object hierarchy's environment will be available at the 0th index of the vector.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ "protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies details about an Acropolis Protection Source when the environment is set to 'kAcropolis'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connection id of the tenant.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connector group id of the connector groups.",
+ },
+ "custom_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the user provided custom name of the Protection Source.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment (such as 'kVMware' or 'kSQL') where the Protection Source exists. Depending on the environment, one of the following Protection Sources are initialized.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the Protection Source.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a name of the Protection Source.",
+ },
+ "parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the parent of the Protection Source.",
+ },
+ "physical_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a Protection Source in a Physical environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "agents": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifiles the agents running on the Physical Protection Source and the status information.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cbmr_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version if Cristie BMR product is installed on the host.",
+ },
+ "file_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the host type where the agent is running. This is only set for persistent agents.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the agent's id.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent's name.",
+ },
+ "oracle_multi_node_channel_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether oracle multi node multi channel is supported or not.",
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of applications environment that are registered with this Protection Source such as 'kSQL'. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "source_side_dedup_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether source side dedup is enabled or not.",
+ },
+ "status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent status. Specifies the status of the agent running on a physical source.",
+ },
+ "status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies additional details about the agent status.",
+ },
+ "upgradability": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the upgradability of the agent running on the physical server. Specifies the upgradability of the agent running on the physical server.",
+ },
+ "upgrade_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the upgrade of the agent on a physical server. Specifies the status of the upgrade of the agent on a physical server.",
+ },
+ "upgrade_status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies detailed message about the agent upgrade failure. This field is not set for successful upgrade.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version of the Agent software.",
+ },
+ "vol_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "cluster_source_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of cluster resource this source represents.",
+ },
+ "host_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the hostname.",
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment type for the host.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an id for an object that is unique across Cohesity Clusters. The id is composite of all the ids listed below.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cluster_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Cohesity Cluster id where the object was created.",
+ },
+ "cluster_incarnation_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id for the Cohesity Cluster that is generated when a Cohesity Cluster is initially created.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id assigned to an object (such as a Job) by the Cohesity Cluster.",
+ },
+ },
+ },
+ },
+ "is_proxy_host": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the physical host is a proxy host.",
+ },
+ "memory_size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total memory on the host in bytes.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the Protection Source.",
+ },
+ "networking_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the struct containing information about network addresses configured on the given box. This is needed for dealing with Windows/Oracle Cluster resources that we discover and protect automatically.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "resource_vec": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The list of resources on the system that are accessible by an IP address.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "endpoints": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The endpoints by which the resource is accessible.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The Fully Qualified Domain Name.",
+ },
+ "ipv4_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv4 address.",
+ },
+ "ipv6_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv6 address.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The type of the resource.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "num_processors": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of processors on the host.",
+ },
+ "os_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the OS of the Protection Source.",
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of managed Object in a Physical Protection Source. 'kGroup' indicates the EH container.",
+ },
+ "vcs_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies cluster version for VCS host.",
+ },
+ "volumes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Physical Volumes. Specifies the volumes available on the physical host. These fields are populated only for the kPhysicalHost type.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "device_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the path to the device that hosts the volume locally.",
+ },
+ "guid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an id for the Physical Volume.",
+ },
+ "is_boot_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is boot volume.",
+ },
+ "is_extended_attributes_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this volume supports extended attributes (like ACLs) when performing file backups.",
+ },
+ "is_protected": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if a volume is protected by a Job.",
+ },
+ "is_shared_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is shared volume.",
+ },
+ "label": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a volume label that can be used for displaying additional identifying information about a volume.",
+ },
+ "logical_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the logical size of the volume in bytes that is not reduced by change-block tracking, compression and deduplication.",
+ },
+ "mount_points": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the mount points where the volume is mounted, for example- 'C:', '/mnt/foo' etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "mount_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies mount type of volume e.g. nfs, autofs, ext4 etc.",
+ },
+ "network_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path to connect to the network attached volume. For example, (IP or hostname):/path/to/share for NFS volumes).",
+ },
+ "used_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the size used by the volume in bytes.",
+ },
+ },
+ },
+ },
+ "vsswriters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies vss writer information about a Physical Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_writer_excluded": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "If true, the writer will be excluded by default.",
+ },
+ "writer_name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the name of the writer.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "sql_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an Object representing one SQL Server instance or database.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_available_for_vss_backup": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is marked as available for backup according to the SQL Server VSS writer. This may be false if either the state of the databases is not online, or if the VSS writer is not online. This field is set only for type 'kDatabase'.",
+ },
+ "created_timestamp": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the time when the database was created. It is displayed in the timezone of the SQL server on which this database is running.",
+ },
+ "database_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the database name of the SQL Protection Source, if the type is database.",
+ },
+ "db_aag_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the AAG entity id if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_aag_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the AAG if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_compatibility_level": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the versions of SQL server that the database is compatible with.",
+ },
+ "db_file_groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the information about the set of file groups for this db on the host. This is only set if the type is kDatabase.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "db_files": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the last known information about the set of database files on the host. This field is set only for type 'kDatabase'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the format type of the file that SQL database stores the data. Specifies the format type of the file that SQL database stores the data. 'kRows' refers to a data file 'kLog' refers to a log file 'kFileStream' refers to a directory containing FILESTREAM data 'kNotSupportedType' is for information purposes only. Not supported. 'kFullText' refers to a full-text catalog.",
+ },
+ "full_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path of the database file on the SQL host machine.",
+ },
+ "size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the last known size of the database file.",
+ },
+ },
+ },
+ },
+ "db_owner_username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the database owner.",
+ },
+ "default_database_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for data files for DBs in an instance.",
+ },
+ "default_log_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for log files for DBs in an instance.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a unique id for a SQL Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "created_date_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique identifier generated from the date the database is created or renamed. Cohesity uses this identifier in combination with the databaseId to uniquely identify a database.",
+ },
+ "database_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id of the database but only for the life of the database. SQL Server may reuse database ids. Cohesity uses the createDateMsecs in combination with this databaseId to uniquely identify a database.",
+ },
+ "instance_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique id for the SQL Server instance. This id does not change during the life of the instance.",
+ },
+ },
+ },
+ },
+ "is_encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is TDE enabled.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the instance name of the SQL Protection Source.",
+ },
+ "owner_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the id of the container VM for the SQL Protection Source.",
+ },
+ "recovery_model": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the Recovery Model for the database in SQL environment. Only meaningful for the 'kDatabase' SQL Protection Source. Specifies the Recovery Model set for the Microsoft SQL Server. 'kSimpleRecoveryModel' indicates the Simple SQL Recovery Model which does not utilize log backups. 'kFullRecoveryModel' indicates the Full SQL Recovery Model which requires log backups and allows recovery to a single point in time. 'kBulkLoggedRecoveryModel' indicates the Bulk Logged SQL Recovery Model which requires log backups and allows high-performance bulk copy operations.",
+ },
+ "sql_server_db_state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The state of the database as returned by SQL Server. Indicates the state of the database. The values correspond to the 'state' field in the system table sys.databases. See https://goo.gl/P66XqM. 'kOnline' indicates that database is in online state. 'kRestoring' indicates that database is in restore state. 'kRecovering' indicates that database is in recovery state. 'kRecoveryPending' indicates that database recovery is in pending state. 'kSuspect' indicates that primary filegroup is suspect and may be damaged. 'kEmergency' indicates that manually forced emergency state. 'kOffline' indicates that database is in offline state. 'kCopying' indicates that database is in copying state. 'kOfflineSecondary' indicates that secondary database is in offline state.",
+ },
+ "sql_server_instance_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Server Instance Version.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the build.",
+ },
+ "major_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the major version.",
+ },
+ "minor_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the minor version.",
+ },
+ "revision": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the revision.",
+ },
+ "version_string": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the version string.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the managed Object in a SQL Protection Source. Examples of SQL Objects include 'kInstance' and 'kDatabase'. 'kInstance' indicates that SQL server instance is being protected. 'kDatabase' indicates that SQL server database is being protected. 'kAAG' indicates that SQL AAG (AlwaysOn Availability Group) is being protected. 'kAAGRootContainer' indicates that SQL AAG's root container is being protected. 'kRootContainer' indicates root container for SQL sources.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of applications environment that are registered with this Protection Source such as 'kSQL'. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "total_downtiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes downtiered from the source so far.",
+ },
+ "total_uptiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes uptiered to the source so far.",
+ },
+ "unprotected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Aggregated information about a node subtree.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "entity_pagination_parameters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the cursor based pagination parameters for Protection Source and its children. Pagination is supported at a given level within the Protection Source Hierarchy with the help of before or after cursors. A Cursor will always refer to a specific source within the source dataset but will be invalidated if the item is removed.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "after_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id starting from which the items are to be returned.",
+ },
+ "before_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id upto which the items are to be returned.",
+ },
+ "node_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id for the Node at any level within the Source entity hierarchy whose children are to be paginated.",
+ },
+ "page_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the maximum number of entities to be returned within the page.",
+ },
+ },
+ },
+ },
+ "entity_permission_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the permission information of entities.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic group details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies group name of the group.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_ids": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the tenants to which the group belongs to.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "is_inferred": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the Entity Permission Information is inferred or not. For example, SQL application hosted over vCenter will have inferred entity permission information.",
+ },
+ "is_registered_by_sp": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this entity is registered by the SP or not. This will be populated only if the entity is a root entity. Refer to magneto/base/permissions.proto for details.",
+ },
+ "registering_tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant id that registered this entity. This will be populated only if the entity is a root entity.",
+ },
+ "tenant": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic tenant details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "bifrost_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this tenant is bifrost enabled or not.",
+ },
+ "is_managed_on_helios": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this tenant is manged on helios.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies name of the tenant.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "users": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic user details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant to which the user belongs to.",
+ },
+ "user_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies user name of the user.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies children of the current node in the Protection Sources hierarchy. When representing Objects in memory, the entire Object subtree hierarchy is represented. You can use this subtree to navigate down the Object hierarchy.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the child subtree used to store additional application-level Objects. Different environments use the subtree to store application-level information. For example for SQL Server, this subtree stores the SQL Server instances running on a VM.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies children of the current node in the Protection Sources hierarchy.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "application_nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the child subtree used to store additional application-level Objects. Different environments use the subtree to store application-level information. For example for SQL Server, this subtree stores the SQL Server instances running on a VM.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{},
+ },
+ },
+ "entity_pagination_parameters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the cursor based pagination parameters for Protection Source and its children. Pagination is supported at a given level within the Protection Source Hierarchy with the help of before or after cursors. A Cursor will always refer to a specific source within the source dataset but will be invalidated if the item is removed.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "after_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id starting from which the items are to be returned.",
+ },
+ "before_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id upto which the items are to be returned.",
+ },
+ "node_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id for the Node at any level within the Source entity hierarchy whose children are to be paginated.",
+ },
+ "page_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the maximum number of entities to be returned within the page.",
+ },
+ },
+ },
+ },
+ "entity_permission_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the permission information of entities.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic group details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies group name of the group.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_ids": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the tenants to which the group belongs to.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "is_inferred": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the Entity Permission Information is inferred or not. For example, SQL application hosted over vCenter will have inferred entity permission information.",
+ },
+ "is_registered_by_sp": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this entity is registered by the SP or not. This will be populated only if the entity is a root entity. Refer to magneto/base/permissions.proto for details.",
+ },
+ "registering_tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant id that registered this entity. This will be populated only if the entity is a root entity.",
+ },
+ "tenant": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic tenant details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "bifrost_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this tenant is bifrost enabled or not.",
+ },
+ "is_managed_on_helios": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this tenant is manged on helios.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies name of the tenant.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "users": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic user details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant to which the user belongs to.",
+ },
+ "user_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies user name of the user.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the logical size of the data in bytes for the Object on this node. Presence of this field indicates this node is a leaf node.",
+ },
+ "nodes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies children of the current node in the Protection Sources hierarchy. When representing Objects in memory, the entire Object subtree hierarchy is represented. You can use this subtree to navigate down the Object hierarchy.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{},
+ },
+ },
+ "object_protection_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Object Protection Info of the Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "auto_protect_parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the auto protect parent id if this entity is protected based on auto protection. This is only specified for leaf entities.",
+ },
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "has_active_object_protection_spec": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies if the entity is under object protection.",
+ },
+ },
+ },
+ },
+ "protected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Protected Objects. Specifies aggregated information about all the child Objects of this node that are currently protected by a Protection Job. There is one entry for each environment that is being backed up. The aggregated information for the Object hierarchy's environment will be available at the 0th index of the vector.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ "protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies details about an Acropolis Protection Source when the environment is set to 'kAcropolis'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connection id of the tenant.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connector group id of the connector groups.",
+ },
+ "custom_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the user provided custom name of the Protection Source.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment (such as 'kVMware' or 'kSQL') where the Protection Source exists. Depending on the environment, one of the following Protection Sources are initialized.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the Protection Source.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a name of the Protection Source.",
+ },
+ "parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the parent of the Protection Source.",
+ },
+ "physical_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a Protection Source in a Physical environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "agents": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifiles the agents running on the Physical Protection Source and the status information.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cbmr_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version if Cristie BMR product is installed on the host.",
+ },
+ "file_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the host type where the agent is running. This is only set for persistent agents.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the agent's id.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent's name.",
+ },
+ "oracle_multi_node_channel_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether oracle multi node multi channel is supported or not.",
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of applications environment that are registered with this Protection Source such as 'kSQL'. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "source_side_dedup_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether source side dedup is enabled or not.",
+ },
+ "status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent status. Specifies the status of the agent running on a physical source.",
+ },
+ "status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies additional details about the agent status.",
+ },
+ "upgradability": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the upgradability of the agent running on the physical server. Specifies the upgradability of the agent running on the physical server.",
+ },
+ "upgrade_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the upgrade of the agent on a physical server. Specifies the status of the upgrade of the agent on a physical server.",
+ },
+ "upgrade_status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies detailed message about the agent upgrade failure. This field is not set for successful upgrade.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version of the Agent software.",
+ },
+ "vol_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "cluster_source_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of cluster resource this source represents.",
+ },
+ "host_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the hostname.",
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment type for the host.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an id for an object that is unique across Cohesity Clusters. The id is composite of all the ids listed below.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cluster_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Cohesity Cluster id where the object was created.",
+ },
+ "cluster_incarnation_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id for the Cohesity Cluster that is generated when a Cohesity Cluster is initially created.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id assigned to an object (such as a Job) by the Cohesity Cluster.",
+ },
+ },
+ },
+ },
+ "is_proxy_host": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the physical host is a proxy host.",
+ },
+ "memory_size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total memory on the host in bytes.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the Protection Source.",
+ },
+ "networking_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the struct containing information about network addresses configured on the given box. This is needed for dealing with Windows/Oracle Cluster resources that we discover and protect automatically.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "resource_vec": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The list of resources on the system that are accessible by an IP address.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "endpoints": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The endpoints by which the resource is accessible.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The Fully Qualified Domain Name.",
+ },
+ "ipv4_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv4 address.",
+ },
+ "ipv6_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv6 address.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The type of the resource.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "num_processors": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of processors on the host.",
+ },
+ "os_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the OS of the Protection Source.",
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of managed Object in a Physical Protection Source. 'kGroup' indicates the EH container.",
+ },
+ "vcs_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies cluster version for VCS host.",
+ },
+ "volumes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Physical Volumes. Specifies the volumes available on the physical host. These fields are populated only for the kPhysicalHost type.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "device_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the path to the device that hosts the volume locally.",
+ },
+ "guid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an id for the Physical Volume.",
+ },
+ "is_boot_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is boot volume.",
+ },
+ "is_extended_attributes_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this volume supports extended attributes (like ACLs) when performing file backups.",
+ },
+ "is_protected": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if a volume is protected by a Job.",
+ },
+ "is_shared_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is shared volume.",
+ },
+ "label": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a volume label that can be used for displaying additional identifying information about a volume.",
+ },
+ "logical_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the logical size of the volume in bytes that is not reduced by change-block tracking, compression and deduplication.",
+ },
+ "mount_points": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the mount points where the volume is mounted, for example- 'C:', '/mnt/foo' etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "mount_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies mount type of volume e.g. nfs, autofs, ext4 etc.",
+ },
+ "network_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path to connect to the network attached volume. For example, (IP or hostname):/path/to/share for NFS volumes).",
+ },
+ "used_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the size used by the volume in bytes.",
+ },
+ },
+ },
+ },
+ "vsswriters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies vss writer information about a Physical Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_writer_excluded": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "If true, the writer will be excluded by default.",
+ },
+ "writer_name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the name of the writer.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "sql_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an Object representing one SQL Server instance or database.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_available_for_vss_backup": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is marked as available for backup according to the SQL Server VSS writer. This may be false if either the state of the databases is not online, or if the VSS writer is not online. This field is set only for type 'kDatabase'.",
+ },
+ "created_timestamp": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the time when the database was created. It is displayed in the timezone of the SQL server on which this database is running.",
+ },
+ "database_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the database name of the SQL Protection Source, if the type is database.",
+ },
+ "db_aag_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the AAG entity id if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_aag_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the AAG if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_compatibility_level": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the versions of SQL server that the database is compatible with.",
+ },
+ "db_file_groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the information about the set of file groups for this db on the host. This is only set if the type is kDatabase.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "db_files": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the last known information about the set of database files on the host. This field is set only for type 'kDatabase'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the format type of the file that SQL database stores the data. Specifies the format type of the file that SQL database stores the data. 'kRows' refers to a data file 'kLog' refers to a log file 'kFileStream' refers to a directory containing FILESTREAM data 'kNotSupportedType' is for information purposes only. Not supported. 'kFullText' refers to a full-text catalog.",
+ },
+ "full_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path of the database file on the SQL host machine.",
+ },
+ "size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the last known size of the database file.",
+ },
+ },
+ },
+ },
+ "db_owner_username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the database owner.",
+ },
+ "default_database_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for data files for DBs in an instance.",
+ },
+ "default_log_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for log files for DBs in an instance.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a unique id for a SQL Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "created_date_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique identifier generated from the date the database is created or renamed. Cohesity uses this identifier in combination with the databaseId to uniquely identify a database.",
+ },
+ "database_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id of the database but only for the life of the database. SQL Server may reuse database ids. Cohesity uses the createDateMsecs in combination with this databaseId to uniquely identify a database.",
+ },
+ "instance_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique id for the SQL Server instance. This id does not change during the life of the instance.",
+ },
+ },
+ },
+ },
+ "is_encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is TDE enabled.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the instance name of the SQL Protection Source.",
+ },
+ "owner_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the id of the container VM for the SQL Protection Source.",
+ },
+ "recovery_model": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the Recovery Model for the database in SQL environment. Only meaningful for the 'kDatabase' SQL Protection Source. Specifies the Recovery Model set for the Microsoft SQL Server. 'kSimpleRecoveryModel' indicates the Simple SQL Recovery Model which does not utilize log backups. 'kFullRecoveryModel' indicates the Full SQL Recovery Model which requires log backups and allows recovery to a single point in time. 'kBulkLoggedRecoveryModel' indicates the Bulk Logged SQL Recovery Model which requires log backups and allows high-performance bulk copy operations.",
+ },
+ "sql_server_db_state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The state of the database as returned by SQL Server. Indicates the state of the database. The values correspond to the 'state' field in the system table sys.databases. See https://goo.gl/P66XqM. 'kOnline' indicates that database is in online state. 'kRestoring' indicates that database is in restore state. 'kRecovering' indicates that database is in recovery state. 'kRecoveryPending' indicates that database recovery is in pending state. 'kSuspect' indicates that primary filegroup is suspect and may be damaged. 'kEmergency' indicates that manually forced emergency state. 'kOffline' indicates that database is in offline state. 'kCopying' indicates that database is in copying state. 'kOfflineSecondary' indicates that secondary database is in offline state.",
+ },
+ "sql_server_instance_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Server Instance Version.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the build.",
+ },
+ "major_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the major version.",
+ },
+ "minor_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the minor version.",
+ },
+ "revision": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the revision.",
+ },
+ "version_string": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the version string.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the managed Object in a SQL Protection Source. Examples of SQL Objects include 'kInstance' and 'kDatabase'. 'kInstance' indicates that SQL server instance is being protected. 'kDatabase' indicates that SQL server database is being protected. 'kAAG' indicates that SQL AAG (AlwaysOn Availability Group) is being protected. 'kAAGRootContainer' indicates that SQL AAG's root container is being protected. 'kRootContainer' indicates root container for SQL sources.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of applications environment that are registered with this Protection Source such as 'kSQL'. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "total_downtiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes downtiered from the source so far.",
+ },
+ "total_uptiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes uptiered to the source so far.",
+ },
+ "unprotected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Aggregated information about a node subtree.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "entity_pagination_parameters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the cursor based pagination parameters for Protection Source and its children. Pagination is supported at a given level within the Protection Source Hierarchy with the help of before or after cursors. A Cursor will always refer to a specific source within the source dataset but will be invalidated if the item is removed.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "after_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id starting from which the items are to be returned.",
+ },
+ "before_cursor_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id upto which the items are to be returned.",
+ },
+ "node_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id for the Node at any level within the Source entity hierarchy whose children are to be paginated.",
+ },
+ "page_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the maximum number of entities to be returned within the page.",
+ },
+ },
+ },
+ },
+ "entity_permission_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the permission information of entities.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic group details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "group_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies group name of the group.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_ids": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the tenants to which the group belongs to.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "is_inferred": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the Entity Permission Information is inferred or not. For example, SQL application hosted over vCenter will have inferred entity permission information.",
+ },
+ "is_registered_by_sp": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this entity is registered by the SP or not. This will be populated only if the entity is a root entity. Refer to magneto/base/permissions.proto for details.",
+ },
+ "registering_tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant id that registered this entity. This will be populated only if the entity is a root entity.",
+ },
+ "tenant": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic tenant details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "bifrost_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this tenant is bifrost enabled or not.",
+ },
+ "is_managed_on_helios": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this tenant is manged on helios.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies name of the tenant.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "users": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies struct with basic user details.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "domain": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies domain name of the user.",
+ },
+ "sid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique Security ID (SID) of the user.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the tenant to which the user belongs to.",
+ },
+ "user_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies user name of the user.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the logical size of the data in bytes for the Object on this node. Presence of this field indicates this node is a leaf node.",
+ },
+ "object_protection_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Object Protection Info of the Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "auto_protect_parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the auto protect parent id if this entity is protected based on auto protection. This is only specified for leaf entities.",
+ },
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "has_active_object_protection_spec": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies if the entity is under object protection.",
+ },
+ },
+ },
+ },
+ "protected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Protected Objects. Specifies aggregated information about all the child Objects of this node that are currently protected by a Protection Job. There is one entry for each environment that is being backed up. The aggregated information for the Object hierarchy's environment will be available at the 0th index of the vector.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ "protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies details about an Acropolis Protection Source when the environment is set to 'kAcropolis'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connection id of the tenant.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connector group id of the connector groups.",
+ },
+ "custom_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the user provided custom name of the Protection Source.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment (such as 'kVMware' or 'kSQL') where the Protection Source exists. Depending on the environment, one of the following Protection Sources are initialized.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the Protection Source.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a name of the Protection Source.",
+ },
+ "parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the parent of the Protection Source.",
+ },
+ "physical_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a Protection Source in a Physical environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "agents": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifiles the agents running on the Physical Protection Source and the status information.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cbmr_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version if Cristie BMR product is installed on the host.",
+ },
+ "file_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the host type where the agent is running. This is only set for persistent agents.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the agent's id.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent's name.",
+ },
+ "oracle_multi_node_channel_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether oracle multi node multi channel is supported or not.",
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Return only Protection Sources that match the passed in environment type such as 'kVMware', 'kSQL', 'kView' 'kPhysical', 'kPuppeteer', 'kPure', 'kNetapp', 'kGenericNas', 'kHyperV', 'kAcropolis', or 'kAzure'. For example, set this parameter to 'kVMware' to only return the Sources (and their Object subtrees) found in the 'kVMware' (VMware vCenter Server) environment.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "source_side_dedup_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether source side dedup is enabled or not.",
+ },
+ "status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent status. Specifies the status of the agent running on a physical source.",
+ },
+ "status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies additional details about the agent status.",
+ },
+ "upgradability": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the upgradability of the agent running on the physical server. Specifies the upgradability of the agent running on the physical server.",
+ },
+ "upgrade_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the upgrade of the agent on a physical server. Specifies the status of the upgrade of the agent on a physical server.",
+ },
+ "upgrade_status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies detailed message about the agent upgrade failure. This field is not set for successful upgrade.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version of the Agent software.",
+ },
+ "vol_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "cluster_source_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of cluster resource this source represents.",
+ },
+ "host_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the hostname.",
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment type for the host.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an id for an object that is unique across Cohesity Clusters. The id is composite of all the ids listed below.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cluster_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Cohesity Cluster id where the object was created.",
+ },
+ "cluster_incarnation_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id for the Cohesity Cluster that is generated when a Cohesity Cluster is initially created.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id assigned to an object (such as a Job) by the Cohesity Cluster.",
+ },
+ },
+ },
+ },
+ "is_proxy_host": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the physical host is a proxy host.",
+ },
+ "memory_size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total memory on the host in bytes.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the Protection Source.",
+ },
+ "networking_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the struct containing information about network addresses configured on the given box. This is needed for dealing with Windows/Oracle Cluster resources that we discover and protect automatically.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "resource_vec": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The list of resources on the system that are accessible by an IP address.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "endpoints": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The endpoints by which the resource is accessible.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The Fully Qualified Domain Name.",
+ },
+ "ipv4_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv4 address.",
+ },
+ "ipv6_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv6 address.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The type of the resource.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "num_processors": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of processors on the host.",
+ },
+ "os_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the OS of the Protection Source.",
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of managed Object in a Physical Protection Source. 'kGroup' indicates the EH container.",
+ },
+ "vcs_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies cluster version for VCS host.",
+ },
+ "volumes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Physical Volumes. Specifies the volumes available on the physical host. These fields are populated only for the kPhysicalHost type.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "device_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the path to the device that hosts the volume locally.",
+ },
+ "guid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an id for the Physical Volume.",
+ },
+ "is_boot_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is boot volume.",
+ },
+ "is_extended_attributes_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this volume supports extended attributes (like ACLs) when performing file backups.",
+ },
+ "is_protected": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if a volume is protected by a Job.",
+ },
+ "is_shared_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is shared volume.",
+ },
+ "label": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a volume label that can be used for displaying additional identifying information about a volume.",
+ },
+ "logical_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the logical size of the volume in bytes that is not reduced by change-block tracking, compression and deduplication.",
+ },
+ "mount_points": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the mount points where the volume is mounted, for example- 'C:', '/mnt/foo' etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "mount_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies mount type of volume e.g. nfs, autofs, ext4 etc.",
+ },
+ "network_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path to connect to the network attached volume. For example, (IP or hostname):/path/to/share for NFS volumes).",
+ },
+ "used_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the size used by the volume in bytes.",
+ },
+ },
+ },
+ },
+ "vsswriters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies vss writer information about a Physical Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_writer_excluded": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "If true, the writer will be excluded by default.",
+ },
+ "writer_name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the name of the writer.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "sql_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an Object representing one SQL Server instance or database.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_available_for_vss_backup": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is marked as available for backup according to the SQL Server VSS writer. This may be false if either the state of the databases is not online, or if the VSS writer is not online. This field is set only for type 'kDatabase'.",
+ },
+ "created_timestamp": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the time when the database was created. It is displayed in the timezone of the SQL server on which this database is running.",
+ },
+ "database_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the database name of the SQL Protection Source, if the type is database.",
+ },
+ "db_aag_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the AAG entity id if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_aag_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the AAG if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_compatibility_level": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the versions of SQL server that the database is compatible with.",
+ },
+ "db_file_groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the information about the set of file groups for this db on the host. This is only set if the type is kDatabase.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "db_files": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the last known information about the set of database files on the host. This field is set only for type 'kDatabase'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the format type of the file that SQL database stores the data. Specifies the format type of the file that SQL database stores the data. 'kRows' refers to a data file 'kLog' refers to a log file 'kFileStream' refers to a directory containing FILESTREAM data 'kNotSupportedType' is for information purposes only. Not supported. 'kFullText' refers to a full-text catalog.",
+ },
+ "full_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path of the database file on the SQL host machine.",
+ },
+ "size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the last known size of the database file.",
+ },
+ },
+ },
+ },
+ "db_owner_username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the database owner.",
+ },
+ "default_database_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for data files for DBs in an instance.",
+ },
+ "default_log_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for log files for DBs in an instance.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a unique id for a SQL Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "created_date_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique identifier generated from the date the database is created or renamed. Cohesity uses this identifier in combination with the databaseId to uniquely identify a database.",
+ },
+ "database_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id of the database but only for the life of the database. SQL Server may reuse database ids. Cohesity uses the createDateMsecs in combination with this databaseId to uniquely identify a database.",
+ },
+ "instance_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique id for the SQL Server instance. This id does not change during the life of the instance.",
+ },
+ },
+ },
+ },
+ "is_encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is TDE enabled.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the instance name of the SQL Protection Source.",
+ },
+ "owner_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the id of the container VM for the SQL Protection Source.",
+ },
+ "recovery_model": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the Recovery Model for the database in SQL environment. Only meaningful for the 'kDatabase' SQL Protection Source. Specifies the Recovery Model set for the Microsoft SQL Server. 'kSimpleRecoveryModel' indicates the Simple SQL Recovery Model which does not utilize log backups. 'kFullRecoveryModel' indicates the Full SQL Recovery Model which requires log backups and allows recovery to a single point in time. 'kBulkLoggedRecoveryModel' indicates the Bulk Logged SQL Recovery Model which requires log backups and allows high-performance bulk copy operations.",
+ },
+ "sql_server_db_state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The state of the database as returned by SQL Server. Indicates the state of the database. The values correspond to the 'state' field in the system table sys.databases. See https://goo.gl/P66XqM. 'kOnline' indicates that database is in online state. 'kRestoring' indicates that database is in restore state. 'kRecovering' indicates that database is in recovery state. 'kRecoveryPending' indicates that database recovery is in pending state. 'kSuspect' indicates that primary filegroup is suspect and may be damaged. 'kEmergency' indicates that manually forced emergency state. 'kOffline' indicates that database is in offline state. 'kCopying' indicates that database is in copying state. 'kOfflineSecondary' indicates that secondary database is in offline state.",
+ },
+ "sql_server_instance_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Server Instance Version.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the build.",
+ },
+ "major_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the major version.",
+ },
+ "minor_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the minor version.",
+ },
+ "revision": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the revision.",
+ },
+ "version_string": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the version string.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the managed Object in a SQL Protection Source. Examples of SQL Objects include 'kInstance' and 'kDatabase'. 'kInstance' indicates that SQL server instance is being protected. 'kDatabase' indicates that SQL server database is being protected. 'kAAG' indicates that SQL AAG (AlwaysOn Availability Group) is being protected. 'kAAGRootContainer' indicates that SQL AAG's root container is being protected. 'kRootContainer' indicates root container for SQL sources.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Return only Protection Sources that match the passed in environment type such as 'kVMware', 'kSQL', 'kView' 'kPhysical', 'kPuppeteer', 'kPure', 'kNetapp', 'kGenericNas', 'kHyperV', 'kAcropolis', or 'kAzure'. For example, set this parameter to 'kVMware' to only return the Sources (and their Object subtrees) found in the 'kVMware' (VMware vCenter Server) environment.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "total_downtiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes downtiered from the source so far.",
+ },
+ "total_uptiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes uptiered to the source so far.",
+ },
+ "unprotected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Aggregated information about a node subtree.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the logical size of the data in bytes for the Object on this node. Presence of this field indicates this node is a leaf node.",
+ },
+ "object_protection_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Object Protection Info of the Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "auto_protect_parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the auto protect parent id if this entity is protected based on auto protection. This is only specified for leaf entities.",
+ },
+ "entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the entity id.",
+ },
+ "has_active_object_protection_spec": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies if the entity is under object protection.",
+ },
+ },
+ },
+ },
+ "protected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Protected Objects. Specifies aggregated information about all the child Objects of this node that are currently protected by a Protection Job. There is one entry for each environment that is being backed up. The aggregated information for the Object hierarchy's environment will be available at the 0th index of the vector.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ "protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies details about an Acropolis Protection Source when the environment is set to 'kAcropolis'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connection id of the tenant.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the connector group id of the connector groups.",
+ },
+ "custom_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the user provided custom name of the Protection Source.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment (such as 'kVMware' or 'kSQL') where the Protection Source exists. Depending on the environment, one of the following Protection Sources are initialized.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the Protection Source.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a name of the Protection Source.",
+ },
+ "parent_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id of the parent of the Protection Source.",
+ },
+ "physical_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a Protection Source in a Physical environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "agents": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifiles the agents running on the Physical Protection Source and the status information.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cbmr_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version if Cristie BMR product is installed on the host.",
+ },
+ "file_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the host type where the agent is running. This is only set for persistent agents.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the agent's id.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent's name.",
+ },
+ "oracle_multi_node_channel_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether oracle multi node multi channel is supported or not.",
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Return only Protection Sources that match the passed in environment type such as 'kVMware', 'kSQL', 'kView' 'kPhysical', 'kPuppeteer', 'kPure', 'kNetapp', 'kGenericNas', 'kHyperV', 'kAcropolis', or 'kAzure'. For example, set this parameter to 'kVMware' to only return the Sources (and their Object subtrees) found in the 'kVMware' (VMware vCenter Server) environment.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "source_side_dedup_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether source side dedup is enabled or not.",
+ },
+ "status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the agent status. Specifies the status of the agent running on a physical source.",
+ },
+ "status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies additional details about the agent status.",
+ },
+ "upgradability": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the upgradability of the agent running on the physical server. Specifies the upgradability of the agent running on the physical server.",
+ },
+ "upgrade_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the upgrade of the agent on a physical server. Specifies the status of the upgrade of the agent on a physical server.",
+ },
+ "upgrade_status_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies detailed message about the agent upgrade failure. This field is not set for successful upgrade.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the version of the Agent software.",
+ },
+ "vol_cbt_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "CBT version and service state info.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Subcomponent version. The interpretation of the version is based on operating system.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "major_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "minor_ver": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ "revision_num": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ },
+ },
+ },
+ },
+ "is_installed": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether the cbt driver is installed.",
+ },
+ "reboot_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Indicates whether host is rebooted post VolCBT installation.",
+ },
+ "service_state": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Structure to Hold Service Status.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ "state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "cluster_source_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of cluster resource this source represents.",
+ },
+ "host_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the hostname.",
+ },
+ "host_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment type for the host.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an id for an object that is unique across Cohesity Clusters. The id is composite of all the ids listed below.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cluster_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Cohesity Cluster id where the object was created.",
+ },
+ "cluster_incarnation_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies an id for the Cohesity Cluster that is generated when a Cohesity Cluster is initially created.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id assigned to an object (such as a Job) by the Cohesity Cluster.",
+ },
+ },
+ },
+ },
+ "is_proxy_host": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the physical host is a proxy host.",
+ },
+ "memory_size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total memory on the host in bytes.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the Protection Source.",
+ },
+ "networking_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the struct containing information about network addresses configured on the given box. This is needed for dealing with Windows/Oracle Cluster resources that we discover and protect automatically.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "resource_vec": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The list of resources on the system that are accessible by an IP address.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "endpoints": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "The endpoints by which the resource is accessible.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fqdn": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The Fully Qualified Domain Name.",
+ },
+ "ipv4_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv4 address.",
+ },
+ "ipv6_addr": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The IPv6 address.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The type of the resource.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "num_processors": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of processors on the host.",
+ },
+ "os_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a human readable name of the OS of the Protection Source.",
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of managed Object in a Physical Protection Source. 'kGroup' indicates the EH container.",
+ },
+ "vcs_version": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies cluster version for VCS host.",
+ },
+ "volumes": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Array of Physical Volumes. Specifies the volumes available on the physical host. These fields are populated only for the kPhysicalHost type.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "device_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the path to the device that hosts the volume locally.",
+ },
+ "guid": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an id for the Physical Volume.",
+ },
+ "is_boot_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is boot volume.",
+ },
+ "is_extended_attributes_supported": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether this volume supports extended attributes (like ACLs) when performing file backups.",
+ },
+ "is_protected": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if a volume is protected by a Job.",
+ },
+ "is_shared_volume": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the volume is shared volume.",
+ },
+ "label": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a volume label that can be used for displaying additional identifying information about a volume.",
+ },
+ "logical_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the logical size of the volume in bytes that is not reduced by change-block tracking, compression and deduplication.",
+ },
+ "mount_points": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the mount points where the volume is mounted, for example- 'C:', '/mnt/foo' etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "mount_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies mount type of volume e.g. nfs, autofs, ext4 etc.",
+ },
+ "network_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path to connect to the network attached volume. For example, (IP or hostname):/path/to/share for NFS volumes).",
+ },
+ "used_size_bytes": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the size used by the volume in bytes.",
+ },
+ },
+ },
+ },
+ "vsswriters": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies vss writer information about a Physical Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_writer_excluded": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "If true, the writer will be excluded by default.",
+ },
+ "writer_name": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies the name of the writer.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "sql_protection_source": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies an Object representing one SQL Server instance or database.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_available_for_vss_backup": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is marked as available for backup according to the SQL Server VSS writer. This may be false if either the state of the databases is not online, or if the VSS writer is not online. This field is set only for type 'kDatabase'.",
+ },
+ "created_timestamp": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the time when the database was created. It is displayed in the timezone of the SQL server on which this database is running.",
+ },
+ "database_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the database name of the SQL Protection Source, if the type is database.",
+ },
+ "db_aag_entity_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the AAG entity id if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_aag_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the AAG if the database is part of an AAG. This field is set only for type 'kDatabase'.",
+ },
+ "db_compatibility_level": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the versions of SQL server that the database is compatible with.",
+ },
+ "db_file_groups": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the information about the set of file groups for this db on the host. This is only set if the type is kDatabase.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "db_files": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the last known information about the set of database files on the host. This field is set only for type 'kDatabase'.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "file_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the format type of the file that SQL database stores the data. Specifies the format type of the file that SQL database stores the data. 'kRows' refers to a data file 'kLog' refers to a log file 'kFileStream' refers to a directory containing FILESTREAM data 'kNotSupportedType' is for information purposes only. Not supported. 'kFullText' refers to a full-text catalog.",
+ },
+ "full_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the full path of the database file on the SQL host machine.",
+ },
+ "size_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the last known size of the database file.",
+ },
+ },
+ },
+ },
+ "db_owner_username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the name of the database owner.",
+ },
+ "default_database_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for data files for DBs in an instance.",
+ },
+ "default_log_location": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the default path for log files for DBs in an instance.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a unique id for a SQL Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "created_date_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique identifier generated from the date the database is created or renamed. Cohesity uses this identifier in combination with the databaseId to uniquely identify a database.",
+ },
+ "database_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a unique id of the database but only for the life of the database. SQL Server may reuse database ids. Cohesity uses the createDateMsecs in combination with this databaseId to uniquely identify a database.",
+ },
+ "instance_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies unique id for the SQL Server instance. This id does not change during the life of the instance.",
+ },
+ },
+ },
+ },
+ "is_encrypted": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether the database is TDE enabled.",
+ },
+ "name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the instance name of the SQL Protection Source.",
+ },
+ "owner_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the id of the container VM for the SQL Protection Source.",
+ },
+ "recovery_model": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the Recovery Model for the database in SQL environment. Only meaningful for the 'kDatabase' SQL Protection Source. Specifies the Recovery Model set for the Microsoft SQL Server. 'kSimpleRecoveryModel' indicates the Simple SQL Recovery Model which does not utilize log backups. 'kFullRecoveryModel' indicates the Full SQL Recovery Model which requires log backups and allows recovery to a single point in time. 'kBulkLoggedRecoveryModel' indicates the Bulk Logged SQL Recovery Model which requires log backups and allows high-performance bulk copy operations.",
+ },
+ "sql_server_db_state": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "The state of the database as returned by SQL Server. Indicates the state of the database. The values correspond to the 'state' field in the system table sys.databases. See https://goo.gl/P66XqM. 'kOnline' indicates that database is in online state. 'kRestoring' indicates that database is in restore state. 'kRecovering' indicates that database is in recovery state. 'kRecoveryPending' indicates that database recovery is in pending state. 'kSuspect' indicates that primary filegroup is suspect and may be damaged. 'kEmergency' indicates that manually forced emergency state. 'kOffline' indicates that database is in offline state. 'kCopying' indicates that database is in copying state. 'kOfflineSecondary' indicates that secondary database is in offline state.",
+ },
+ "sql_server_instance_version": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Server Instance Version.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "build": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the build.",
+ },
+ "major_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the major version.",
+ },
+ "minor_version": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the minor version.",
+ },
+ "revision": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the revision.",
+ },
+ "version_string": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the version string.",
+ },
+ },
+ },
+ },
+ "type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the managed Object in a SQL Protection Source. Examples of SQL Objects include 'kInstance' and 'kDatabase'. 'kInstance' indicates that SQL server instance is being protected. 'kDatabase' indicates that SQL server database is being protected. 'kAAG' indicates that SQL AAG (AlwaysOn Availability Group) is being protected. 'kAAGRootContainer' indicates that SQL AAG's root container is being protected. 'kRootContainer' indicates root container for SQL sources.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "registration_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information about a registered Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "access_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to establish a connection with a particular environment.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "connection_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "ID of the Bifrost (HyX or Rigel) network realm (i.e. a connection) associated with the source.",
+ },
+ "connector_group_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Id of the connector group. Each connector group is collection of Rigel/hyx. Each entity will be tagged with connector group id.",
+ },
+ "endpoint": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specify an IP address or URL of the environment. (such as the IP address of the vCenter Server for a VMware environment).",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment like VMware, SQL, where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies a Unique id that is generated when the Source is registered. This is a convenience field that is used to maintain an index to different connection params.",
+ },
+ "version": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Version is updated each time the connector parameters are updated. This is used to discard older connector parameters.",
+ },
+ },
+ },
+ },
+ "allowed_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be exclusively allowed for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies an authentication error message. This indicates the given credentials are rejected and the registration of the source is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of the authenticating to the Protection Source when registering it with Cohesity Cluster.",
+ },
+ "blacklisted_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "This field is deprecated. Use DeniedIpAddresses instead.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "denied_ip_addresses": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of IP Addresses on the registered source to be denied for doing any type of IO operations.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "environments": &schema.Schema{
+ Type: schema.TypeList,
+ Optional: true,
+ Description: "Return only Protection Sources that match the passed in environment type such as 'kVMware', 'kSQL', 'kView' 'kPhysical', 'kPuppeteer', 'kPure', 'kNetapp', 'kGenericNas', 'kHyperV', 'kAcropolis', or 'kAzure'. For example, set this parameter to 'kVMware' to only return the Sources (and their Object subtrees) found in the 'kVMware' (VMware vCenter Server) environment.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "is_db_authenticated": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if application entity dbAuthenticated or not.",
+ },
+ "is_storage_array_snapshot_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if this source entity has enabled storage array snapshot or not.",
+ },
+ "link_vms_across_vcenter": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the VM linking feature is enabled for this VCenter This means that VMs present in this VCenter which earlier belonged to some other VCenter(also registerd on same cluster) and were migrated, will be linked during EH refresh. This will enable preserving snapshot chains for migrated VMs.",
+ },
+ "minimum_free_space_gb": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in GiB of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in GiB) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "minimum_free_space_percent": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minimum free space in percentage of the space expected to be available on the datastore where the virtual disks of the VM being backed up. If the amount of free space(in percentage) is lower than the value given by this field, backup will be aborted. Note that this field is applicable only to 'kVMware' type of environments.",
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "physical_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the parameters required to register Application Servers running in a Protection Source specific to a physical adapter.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "applications": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the types of applications such as 'kSQL', 'kExchange', 'kAD' running on the Protection Source. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "password": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies password of the username to access the target source.",
+ },
+ "throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the source side throttling configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "cpu_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "network_throttling_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Configuration Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "fixed_threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Fixed baseline threshold for throttling. This is mandatory for any other throttling type than kNoThrottling.",
+ },
+ "pattern_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Type of the throttling pattern. 'kNoThrottling' indicates that throttling is not in force. 'kBaseThrottling' indicates indicates a constant base level throttling. 'kFixed' indicates a constant base level throttling.",
+ },
+ "throttling_windows": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Throttling Window Parameters Definition.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day_time_window": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Window Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "end_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "start_time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the Day Time Parameters.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "day": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the day of the week (such as 'kMonday') for scheduling throttling. Specifies a day in a week such as 'kSunday', 'kMonday', etc.",
+ },
+ "time": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the time in hours and minutes.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "hour": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the hour of this time.",
+ },
+ "minute": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the minute of this time.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "threshold": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Throttling threshold applicable in the window.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ },
+ },
+ },
+ "progress_monitor_path": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Captures the current progress and pulse details w.r.t to either the registration or refresh.",
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the Protection Source tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ "refresh_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source tree was most recently fetched and built.",
+ },
+ "registered_apps_info": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies information of the applications registered on this protection source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "authentication_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "pecifies an authentication error message. This indicates the given credentials are rejected and the registration of the application is not successful.",
+ },
+ "authentication_status": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the status of authenticating to the Protection Source when registering this application with Cohesity Cluster. If the status is 'kFinished' and there is no error, registration is successful. Specifies the status of the authentication during the registration of a Protection Source. 'kPending' indicates the authentication is in progress. 'kScheduled' indicates the authentication is scheduled. 'kFinished' indicates the authentication is completed. 'kRefreshInProgress' indicates the refresh is in progress.",
+ },
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the application environment. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc.",
+ },
+ "host_settings_check_results": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of check results internally performed to verify status of various services such as 'AgnetRunning', 'SQLWriterRunning' etc.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "check_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the check internally performed. Specifies the type of the host check performed internally. 'kIsAgentPortAccessible' indicates the check for agent port access. 'kIsAgentRunning' indicates the status for the Cohesity agent service. 'kIsSQLWriterRunning' indicates the status for SQLWriter service. 'kAreSQLInstancesRunning' indicates the run status for all the SQL instances in the host. 'kCheckServiceLoginsConfig' checks the privileges and sysadmin status of the logins used by the SQL instance services, Cohesity agent service and the SQLWriter service. 'kCheckSQLFCIVIP' checks whether the SQL FCI is registered with a valid VIP or FQDN. 'kCheckSQLDiskSpace' checks whether volumes containing SQL DBs have at least 10% free space.",
+ },
+ "result_type": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the type of the result returned after performing the internal host check. Specifies the type of the host check result performed internally. 'kPass' indicates that the respective check was successful. 'kFail' indicates that the respective check failed as some mandatory setting is not met 'kWarning' indicates that the respective check has warning as certain non-mandatory setting is not met.",
+ },
+ "user_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a descriptive message for failed/warning types.",
+ },
+ },
+ },
+ },
+ "refresh_error_message": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies a message if there was any error encountered during the last rebuild of the application tree. If there was no error during the last rebuild, this field is reset.",
+ },
+ },
+ },
+ },
+ "registration_time_usecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Unix epoch time (in microseconds) when the Protection Source was registered.",
+ },
+ "subnets": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the list of subnets added during creation or updation of vmare source. Currently, this field will only be populated in case of VMware registration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "component": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "description": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Description of the subnet.",
+ },
+ "id": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "ID of the subnet.",
+ },
+ "ip": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies either an IPv6 address or an IPv4 address.",
+ },
+ "netmask_bits": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "netmaskBits.",
+ },
+ "netmask_ip4": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the netmask using an IP4 address. The netmask can only be set using netmaskIp4 if the IP address is an IPv4 address.",
+ },
+ "nfs_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Component that has reserved the subnet.",
+ },
+ "nfs_all_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether all clients from this subnet can map view with view_all_squash_uid/view_all_squash_gid configured in the view.",
+ },
+ "nfs_root_squash": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount as root on NFS.",
+ },
+ "s3_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can access using S3 protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "smb_access": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies whether clients from this subnet can mount using SMB protocol. Protocol access level. 'kDisabled' indicates Protocol access level 'Disabled' 'kReadOnly' indicates Protocol access level 'ReadOnly' 'kReadWrite' indicates Protocol access level 'ReadWrite'.",
+ },
+ "tenant_id": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the unique id of the tenant.",
+ },
+ },
+ },
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "throttling_policy_overrides": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policy override for a Datastore in a registered entity.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "datastore_id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the Protection Source id of the Datastore.",
+ },
+ "datastore_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the display name of the Datastore.",
+ },
+ "throttling_policy": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the throttling policy for a registered Protection Source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "enforce_max_streams": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether datastore streams are configured for all datastores that are part of the registered entity. If set to true, number of streams from Cohesity cluster to the registered entity will be limited to the value set for maxConcurrentStreams. If not set or set to false, there is no max limit for the number of concurrent streams.",
+ },
+ "enforce_registered_source_max_backups": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether no. of backups are configured for the registered entity. If set to true, number of backups made by Cohesity cluster in the registered entity will be limited to the value set for RegisteredSourceMaxConcurrentBackups. If not set or set to false, there is no max limit for the number of concurrent backups.",
+ },
+ "is_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Indicates whether read operations to the datastores, which are part of the registered Protection Source, are throttled.",
+ },
+ "latency_thresholds": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies latency thresholds that trigger throttling for all datastores found in the registered Protection Source or specific to one datastore.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "active_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, existing backup tasks using the datastore are throttled.",
+ },
+ "new_task_msecs": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "If the latency of a datastore is above this value, then new backup tasks using the datastore will not be started.",
+ },
+ },
+ },
+ },
+ "max_concurrent_streams": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of streams Cohesity cluster will make concurrently to the datastores of the registered entity. This limit is enforced only when the flag enforceMaxStreams is set to true.",
+ },
+ "nas_source_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the NAS specific source throttling parameters during source registration or during backup of the source.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_parallel_metadata_fetch_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during full backup of the source.",
+ },
+ "max_parallel_metadata_fetch_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent metadata to be fetched during incremental backup of the source.",
+ },
+ "max_parallel_read_write_full_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during full backup of the source.",
+ },
+ "max_parallel_read_write_incremental_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the percentage value of maximum concurrent IO during incremental backup of the source.",
+ },
+ },
+ },
+ },
+ "registered_source_max_concurrent_backups": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the limit on the number of backups Cohesity cluster will make concurrently to the registered entity. This limit is enforced only when the flag enforceRegisteredSourceMaxBackups is set to true.",
+ },
+ "storage_array_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Configuration.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "storage_array_snapshot_max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "storage_array_snapshot_throttling_policies": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies throttling policies configured for individual volume/lun.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "id": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the volume id of the storage array snapshot config.",
+ },
+ "is_max_snapshots_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max snapshots config is enabled or not.",
+ },
+ "is_max_space_config_enabled": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if the storage array snapshot max space config is enabled or not.",
+ },
+ "max_snapshot_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Snapshots Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshots": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ "max_space_config": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies Storage Array Snapshot Max Space Config.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "max_snapshot_space_percentage": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Max number of storage snapshots allowed per volume/lun.",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ "use_o_auth_for_exchange_online": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether OAuth should be used for authentication in case of Exchange Online.",
+ },
+ "use_vm_bios_uuid": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies if registered vCenter is using BIOS UUID to track virtual machines.",
+ },
+ "user_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the additional details encountered during registration. Though the registration may succeed, user messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ "username": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies username to access the target source.",
+ },
+ "vlan_params": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies the VLAN configuration for Recovery.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "vlan": &schema.Schema{
+ Type: schema.TypeFloat,
+ Computed: true,
+ Description: "Specifies the VLAN to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ "disable_vlan": &schema.Schema{
+ Type: schema.TypeBool,
+ Computed: true,
+ Description: "Specifies whether to use the VIPs even when VLANs are configured on the Cluster. If configured, VLAN IP addresses are used by default. If VLANs are not configured, this flag is ignored. Set this flag to true to force using the partition VIPs when VLANs are configured on the Cluster.",
+ },
+ "interface_name": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the physical interface group name to use for mounting Cohesity's view on the remote host. If specified, Cohesity hostname or the IP address on this VLAN is used.",
+ },
+ },
+ },
+ },
+ "warning_messages": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Specifies a list of warnings encountered during registration. Though the registration may succeed, warning messages imply the host environment requires some cleanup or fixing.",
+ Elem: &schema.Schema{
+ Type: schema.TypeString,
+ },
+ },
+ },
+ },
+ },
+ "total_downtiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes downtiered from the source so far.",
+ },
+ "total_uptiered_size_in_bytes": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total bytes uptiered to the source so far.",
+ },
+ "unprotected_sources_summary": &schema.Schema{
+ Type: schema.TypeList,
+ Computed: true,
+ Description: "Aggregated information about a node subtree.",
+ Elem: &schema.Resource{
+ Schema: map[string]*schema.Schema{
+ "environment": &schema.Schema{
+ Type: schema.TypeString,
+ Computed: true,
+ Description: "Specifies the environment such as 'kSQL' or 'kVMware', where the Protection Source exists. Supported environment types such as 'kView', 'kSQL', 'kVMware', etc. NOTE 'kPuppeteer' refers to Cohesity's Remote Adapter. 'kVMware' indicates the VMware Protection Source environment. 'kHyperV' indicates the HyperV Protection Source environment. 'kSQL' indicates the SQL Protection Source environment. 'kView' indicates the View Protection Source environment. 'kPuppeteer' indicates the Cohesity's Remote Adapter. 'kPhysical' indicates the physical Protection Source environment. 'kPure' indicates the Pure Storage Protection Source environment. 'kNimble' indicates the Nimble Storage Protection Source environment. 'kAzure' indicates the Microsoft's Azure Protection Source environment. 'kNetapp' indicates the Netapp Protection Source environment. 'kAgent' indicates the Agent Protection Source environment. 'kGenericNas' indicates the Generic Network Attached Storage Protection Source environment. 'kAcropolis' indicates the Acropolis Protection Source environment. 'kPhysicalFiles' indicates the Physical Files Protection Source environment. 'kIbmFlashSystem' indicates the IBM Flash System Protection Source environment. 'kIsilon' indicates the Dell EMC's Isilon Protection Source environment. 'kGPFS' indicates IBM's GPFS Protection Source environment. 'kKVM' indicates the KVM Protection Source environment. 'kAWS' indicates the AWS Protection Source environment. 'kExchange' indicates the Exchange Protection Source environment. 'kHyperVVSS' indicates the HyperV VSS Protection Source environment. 'kOracle' indicates the Oracle Protection Source environment. 'kGCP' indicates the Google Cloud Platform Protection Source environment. 'kFlashBlade' indicates the Flash Blade Protection Source environment. 'kAWSNative' indicates the AWS Native Protection Source environment. 'kO365' indicates the Office 365 Protection Source environment. 'kO365Outlook' indicates Office 365 outlook Protection Source environment. 'kHyperFlex' indicates the Hyper Flex Protection Source environment. 'kGCPNative' indicates the GCP Native Protection Source environment. 'kAzureNative' indicates the Azure Native Protection Source environment. 'kKubernetes' indicates a Kubernetes Protection Source environment. 'kElastifile' indicates Elastifile Protection Source environment. 'kAD' indicates Active Directory Protection Source environment. 'kRDSSnapshotManager' indicates AWS RDS Protection Source environment. 'kCassandra' indicates Cassandra Protection Source environment. 'kMongoDB' indicates MongoDB Protection Source environment. 'kCouchbase' indicates Couchbase Protection Source environment. 'kHdfs' indicates Hdfs Protection Source environment. 'kHive' indicates Hive Protection Source environment. 'kHBase' indicates HBase Protection Source environment. 'kUDA' indicates Universal Data Adapter Protection Source environment. 'kO365Teams' indicates the Office365 Teams Protection Source environment. 'kO365Group' indicates the Office365 Groups Protection Source environment. 'kO365Exchange' indicates the Office365 Mailbox Protection Source environment. 'kO365OneDrive' indicates the Office365 OneDrive Protection Source environment. 'kO365Sharepoint' indicates the Office365 SharePoint Protection Source environment. 'kO365PublicFolders' indicates the Office365 PublicFolders Protection Source environment.",
+ },
+ "leaves_count": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the number of leaf nodes under the subtree of this node.",
+ },
+ "total_logical_size": &schema.Schema{
+ Type: schema.TypeInt,
+ Computed: true,
+ Description: "Specifies the total logical size of the data under the subtree of this node.",
+ },
+ },
+ },
+ },
+ },
},
},
"object_protection_info": &schema.Schema{
@@ -11146,6 +22060,17 @@ func dataSourceIbmBackupRecoveryProtectionSourcesRead(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
listProtectionSourcesOptions := &backuprecoveryv1.ListProtectionSourcesOptions{}
@@ -11292,6 +22217,7 @@ func dataSourceIbmBackupRecoveryProtectionSourcesRead(context context.Context, d
}
protectionSources = append(protectionSources, protectionSourcesItemMap)
}
+
if err = d.Set("protection_sources", protectionSources); err != nil {
return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting protection_sources: %s", err), "(Data) ibm_backup_recovery_protection_sources", "read", "set-protection_sources").GetDiag()
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources_test.go
index 8bfddefb7b..13fef1db05 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_protection_sources_test.go
@@ -36,6 +36,8 @@ func testAccCheckIbmBackupRecoveryProtectionSourcesDataSourceConfigBasic() strin
return fmt.Sprintf(`
data "ibm_backup_recovery_protection_sources" "backup_recovery_protection_sources_instance"{
x_ibm_tenant_id = "%s"
+
+ backup_recovery_protection_source_nodes_id = 344
}
`, tenantId)
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object.go
index 3993b94461..ba2631f83d 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object.go
@@ -8099,6 +8099,17 @@ func dataSourceIbmBackupRecoverySearchIndexedObjectRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
searchIndexedObjectsOptions := &backuprecoveryv1.SearchIndexedObjectsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object_test.go
index eb9391ce38..1918a9f1bd 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_indexed_object_test.go
@@ -15,7 +15,7 @@ import (
func TestAccIbmBackupRecoverySearchIndexedObjectDataSourceBasic(t *testing.T) {
objectType := "Files"
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -37,6 +37,7 @@ func testAccCheckIbmBackupRecoverySearchIndexedObjectConfigBasic(objectType stri
data "ibm_backup_recovery_search_indexed_object" "baas_search_indexed_object_instance" {
x_ibm_tenant_id = "%s"
+
object_type = "%s"
file_params {
source_ids = [%d]
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects.go
index c8955eb8a5..10f1b2c30d 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects.go
@@ -1809,6 +1809,17 @@ func dataSourceIbmBackupRecoverySearchObjectsRead(context context.Context, d *sc
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
searchObjectsOptions := &backuprecoveryv1.SearchObjectsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects_test.go
index 4c62d143a8..4aac66973c 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_objects_test.go
@@ -37,6 +37,7 @@ func testAccCheckIbmBackupRecoverySearchObjectsDataSourceConfigBasic() string {
return fmt.Sprintf(`
data "ibm_backup_recovery_search_objects" "baas_search_objects_instance" {
x_ibm_tenant_id = "%s"
+
}
`, tenantId)
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects.go
index e3494f8494..1a23a8ddaa 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects.go
@@ -1292,6 +1292,18 @@ func dataSourceIbmBackupRecoverySearchProtectedObjectsRead(context context.Conte
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
searchProtectedObjectsOptions := &backuprecoveryv1.SearchProtectedObjectsOptions{}
searchProtectedObjectsOptions.SetXIBMTenantID(d.Get("x_ibm_tenant_id").(string))
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects_test.go
index 1fada3b26e..fe2311ca2e 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_search_protected_objects_test.go
@@ -39,6 +39,7 @@ func testAccCheckIbmBackupRecoverySearchProtectedObjectsDataSourceConfigBasic()
return fmt.Sprintf(`
data "ibm_backup_recovery_search_protected_objects" "baas_search_protected_objects_instance" {
x_ibm_tenant_id = "%s"
+
}
`, tenantId)
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration.go
index 5f4fa63662..732fc41390 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration.go
@@ -954,6 +954,17 @@ func dataSourceIbmBackupRecoverySourceRegistrationRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getProtectionSourceRegistrationOptions := &backuprecoveryv1.GetProtectionSourceRegistrationOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration_test.go
index e9edc917f7..d026ce3ac8 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registration_test.go
@@ -19,7 +19,7 @@ import (
func TestAccIbmBackupRecoverySourceRegistrationDataSourceBasic(t *testing.T) {
// environment := "kPhysical"
- objectId := 18
+ objectId := 344
// endpoint := "172.26.1.24"
// hostType := "kLinux"
// physicalType := "kHost"
@@ -57,6 +57,7 @@ func testAccCheckIbmBackupRecoverySourceRegistrationDataSourceConfigBasic(object
return fmt.Sprintf(`
data "ibm_backup_recovery_source_registration" "baas_source_registration_instance" {
+
source_registration_id = %d
x_ibm_tenant_id = "%s"
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations.go
index a5afe1af06..c24b965ca6 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations.go
@@ -991,6 +991,17 @@ func dataSourceIbmBackupRecoverySourceRegistrationsRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getSourceRegistrationsOptions := &backuprecoveryv1.GetSourceRegistrationsOptions{}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations_test.go
index 04563271de..08a1702c9a 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_source_registrations_test.go
@@ -23,7 +23,7 @@ func TestAccIbmBackupRecoverySourceRegistrationsDataSourceBasic(t *testing.T) {
// hostType := "kLinux"
// physicalType := "kHost"
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -63,6 +63,7 @@ func testAccCheckIbmBackupRecoverySourceRegistrationsDataSourceConfigBasic(objec
return fmt.Sprintf(`
data "ibm_backup_recovery_source_registrations" "baas_source_registrations_instance" {
+
ids = [%d]
x_ibm_tenant_id = "%s"
}
diff --git a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_test.go b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_test.go
index b89fd9dce4..3cce259af5 100644
--- a/ibm/service/backuprecovery/data_source_ibm_backup_recovery_test.go
+++ b/ibm/service/backuprecovery/data_source_ibm_backup_recovery_test.go
@@ -21,9 +21,9 @@ import (
func TestAccIbmBackupRecoveryDataSourceBasic(t *testing.T) {
name := fmt.Sprintf("tf_recovery_name_%d", acctest.RandIntRange(10, 100))
snapshotEnvironment := "kPhysical"
- objectId := 18
+ objectId := 344
targetenvironment := "kPhysical"
- absolutePath := "/data/"
+ absolutePath := "/mnt"
restoreEntityType := "kRegular"
recoveryAction := "RecoverFiles"
@@ -50,12 +50,14 @@ func testAccCheckIbmBackupRecoveryDataSourceConfigBasic(objectId int, name, snap
data "ibm_backup_recovery_object_snapshots" "object_snapshot" {
x_ibm_tenant_id = "%s"
+
object_id = %d
}
resource "ibm_backup_recovery" "baas_recovery_instance" {
x_ibm_tenant_id = "%s"
snapshot_environment = "%s"
+
name = "%s"
physical_params {
recovery_action = "%s"
@@ -80,6 +82,7 @@ func testAccCheckIbmBackupRecoveryDataSourceConfigBasic(objectId int, name, snap
data "ibm_backup_recovery" "baas_recovery_instance" {
recovery_id = ibm_backup_recovery.baas_recovery_instance.recovery_id
+
x_ibm_tenant_id = "%[1]s"
}
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery.go
index 72ecd5b529..240bd57057 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery.go
@@ -2423,6 +2423,18 @@ func resourceIbmBackupRecoveryCreate(context context.Context, d *schema.Resource
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
createRecoveryOptions := &backuprecoveryv1.CreateRecoveryOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
createRecoveryOptions.SetXIBMTenantID(tenantId)
@@ -2466,6 +2478,17 @@ func resourceIbmBackupRecoveryRead(context context.Context, d *schema.ResourceDa
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getRecoveryByIdOptions := &backuprecoveryv1.GetRecoveryByIdOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
@@ -2488,6 +2511,22 @@ func resourceIbmBackupRecoveryRead(context context.Context, d *schema.ResourceDa
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_recovery", "read", "set-backup-recovery-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_recovery", "read", "set-backup-recovery-region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_recovery", "read", "set-endpoint-type").GetDiag()
+ }
+
if err = d.Set("name", recovery.Name); err != nil {
err = fmt.Errorf("Error setting name: %s", err)
return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_recovery", "read", "set-name").GetDiag()
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task.go
index 0b1c70f308..5be9c5190c 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task.go
@@ -248,6 +248,17 @@ func resourceIbmBackupRecoveryAgentUpgradeTaskCreate(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
createUpgradeTaskOptions := &backuprecoveryv1.CreateUpgradeTaskOptions{}
@@ -296,6 +307,17 @@ func resourceIbmBackupRecoveryAgentUpgradeTaskRead(context context.Context, d *s
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getUpgradeTasksOptions := &backuprecoveryv1.GetUpgradeTasksOptions{}
@@ -319,6 +341,22 @@ func resourceIbmBackupRecoveryAgentUpgradeTaskRead(context context.Context, d *s
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_agent_upgrade_task", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_agent_upgrade_task", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_agent_upgrade_task", "read", "set-endpoint-type").GetDiag()
+ }
+
if !core.IsNil(agentUpgradeTaskStates.Tasks[0].AgentIDs) {
agentIDs := []interface{}{}
for _, agentIDsItem := range agentUpgradeTaskStates.Tasks[0].AgentIDs {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task_test.go
index 5020fe00cc..76aeca058a 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_agent_upgrade_task_test.go
@@ -20,7 +20,7 @@ import (
func TestAccIbmBackupRecoveryAgentUpgradeTaskBasic(t *testing.T) {
var conf backuprecoveryv1.AgentUpgradeTaskStates
name := fmt.Sprintf("tf_name_upgarde_task_%d", acctest.RandIntRange(10, 100))
- agentId := 19
+ agentId := 346
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
@@ -43,6 +43,7 @@ func testAccCheckIbmBackupRecoveryAgentUpgradeTaskConfigBasic(name string, agent
return fmt.Sprintf(`
resource "ibm_backup_recovery_agent_upgrade_task" "baas_agent_upgrade_task_instance" {
x_ibm_tenant_id = "%s"
+
agent_ids = [%d]
name = "%s"
}
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection.go
index 58226a6d1f..c07fa12f98 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection.go
@@ -80,6 +80,17 @@ func resourceIbmBackupRecoveryDataSourceConnectionCreate(context context.Context
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
createDataSourceConnectionOptions := &backuprecoveryv1.CreateDataSourceConnectionOptions{}
@@ -109,6 +120,17 @@ func resourceIbmBackupRecoveryDataSourceConnectionRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
tenantId := d.Get("x_ibm_tenant_id").(string)
connectionId := d.Id()
@@ -132,6 +154,22 @@ func resourceIbmBackupRecoveryDataSourceConnectionRead(context context.Context,
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_data_source_connection", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_data_source_connection", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_data_source_connection", "read", "set-endpoint-type").GetDiag()
+ }
+
if err = d.Set("connection_id", dataSourceConnectionList.Connections[0].ConnectionID); err != nil {
err = fmt.Errorf("Error setting connection_id: %s", err)
return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_data_source_connection", "read", "set-connection_id").GetDiag()
@@ -188,6 +226,17 @@ func resourceIbmBackupRecoveryDataSourceConnectionUpdate(context context.Context
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
patchDataSourceConnectionOptions := &backuprecoveryv1.PatchDataSourceConnectionOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
@@ -225,6 +274,17 @@ func resourceIbmBackupRecoveryDataSourceConnectionDelete(context context.Context
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
deleteDataSourceConnectionOptions := &backuprecoveryv1.DeleteDataSourceConnectionOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection_test.go
index 86a6f27e83..25bf985448 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connection_test.go
@@ -54,6 +54,7 @@ func testAccCheckIbmBackupRecoveryDataSourceConnectionConfigBasic(connectionName
return fmt.Sprintf(`
resource "ibm_backup_recovery_data_source_connection" "baas_data_source_connection_instance" {
x_ibm_tenant_id = "%s"
+
connection_name = "%s"
}
`, tenantId, connectionName)
@@ -95,6 +96,7 @@ func testAccCheckIbmBackupRecoveryDataSourceConnectionDestroy(s *terraform.State
if err != nil {
return err
}
+
for _, rs := range s.RootModule().Resources {
if rs.Type != "ibm_backup_recovery_data_source_connection" {
continue
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connector_patch.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connector_patch.go
index 265b484f01..5c9ad15bb1 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connector_patch.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_data_source_connector_patch.go
@@ -154,6 +154,17 @@ func resourceIbmBackupRecoveryDataSourceConnectorPatchCreate(context context.Con
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
patchDataSourceConnectorOptions := &backuprecoveryv1.PatchDataSourceConnectorOptions{}
@@ -182,6 +193,17 @@ func resourceIbmBackupRecoveryDataSourceConnectorPatchRead(context context.Conte
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getDataSourceConnectorsOptions := &backuprecoveryv1.GetDataSourceConnectorsOptions{}
@@ -199,6 +221,22 @@ func resourceIbmBackupRecoveryDataSourceConnectorPatchRead(context context.Conte
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_data_source_connector_patch", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_data_source_connector_patch", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_data_source_connector_patch", "read", "set-endpoint-type").GetDiag()
+ }
+
if !core.IsNil(dataSourceConnectorList.Connectors[0].ConnectorName) {
if err = d.Set("connector_name", dataSourceConnectorList.Connectors[0].ConnectorName); err != nil {
err = fmt.Errorf("Error setting connector_name: %s", err)
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders.go
index 7c5520d272..5d91475d8d 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders.go
@@ -18,7 +18,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -2855,29 +2854,6 @@ func checkDiffResourceIbmBackupRecoveryDownloadFilesFolders(context context.Cont
return nil
}
-func ResourceIbmBackupRecoveryDownloadFilesFoldersValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "parent_recovery_id",
- ValidateFunctionIdentifier: validate.ValidateRegexp,
- Type: validate.TypeString,
- Optional: true,
- Regexp: `^\d+:\d+:\d+$`,
- },
- validate.ValidateSchema{
- Identifier: "glacier_retrieval_type",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Optional: true,
- AllowedValues: "kExpeditedNoPCU, kExpeditedWithPCU, kStandard",
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_recovery_download_files_folders", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoveryDownloadFilesFoldersCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -2885,6 +2861,17 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersCreate(context context.Context
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
createDownloadFilesAndFoldersRecoveryOptions := &backuprecoveryv1.CreateDownloadFilesAndFoldersRecoveryOptions{}
@@ -2943,6 +2930,17 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getRecoveryByIdOptions := &backuprecoveryv1.GetRecoveryByIdOptions{}
@@ -2962,45 +2960,61 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
d.SetId(*getRecoveryByIdOptions.ID)
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_recovery_download_files_folders", "read", "set-endpoint-type").GetDiag()
+ }
+
if !core.IsNil(recovery.Name) {
if err = d.Set("recovery_name", recovery.Name); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting name: %s", err), "(Data) ibm_recovery", "read", "set-name").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting name: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-name").GetDiag()
}
}
if !core.IsNil(recovery.StartTimeUsecs) {
if err = d.Set("recovery_start_time_usecs", flex.IntValue(recovery.StartTimeUsecs)); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting start_time_usecs: %s", err), "(Data) ibm_recovery", "read", "set-start_time_usecs").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting start_time_usecs: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-start_time_usecs").GetDiag()
}
}
if !core.IsNil(recovery.EndTimeUsecs) {
if err = d.Set("recovery_end_time_usecs", flex.IntValue(recovery.EndTimeUsecs)); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting end_time_usecs: %s", err), "(Data) ibm_recovery", "read", "set-end_time_usecs").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting end_time_usecs: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-end_time_usecs").GetDiag()
}
}
if !core.IsNil(recovery.Status) {
if err = d.Set("recovery_status", recovery.Status); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting status: %s", err), "(Data) ibm_recovery", "read", "set-status").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting status: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-status").GetDiag()
}
}
if !core.IsNil(recovery.ProgressTaskID) {
if err = d.Set("recovery_progress_task_id", recovery.ProgressTaskID); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting progress_task_id: %s", err), "(Data) ibm_recovery", "read", "set-progress_task_id").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting progress_task_id: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-progress_task_id").GetDiag()
}
}
if !core.IsNil(recovery.SnapshotEnvironment) {
if err = d.Set("recovery_snapshot_environment", recovery.SnapshotEnvironment); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting snapshot_environment: %s", err), "(Data) ibm_recovery", "read", "set-snapshot_environment").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting snapshot_environment: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-snapshot_environment").GetDiag()
}
}
if !core.IsNil(recovery.RecoveryAction) {
if err = d.Set("recovery_action", recovery.RecoveryAction); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting recovery_action: %s", err), "(Data) ibm_recovery", "read", "set-recovery_action").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting recovery_action: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-recovery_action").GetDiag()
}
}
@@ -3009,12 +3023,12 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
for _, permissionsItem := range recovery.Permissions {
permissionsItemMap, err := DataSourceIbmBackupRecoveryTenantToMap(&permissionsItem) // #nosec G601
if err != nil {
- return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_recovery", "read", "permissions-to-map").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Resource) ibm_recovery_download_files_folders", "read", "permissions-to-map").GetDiag()
}
permissions = append(permissions, permissionsItemMap)
}
if err = d.Set("recovery_permissions", permissions); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting permissions: %s", err), "(Data) ibm_recovery", "read", "set-permissions").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting permissions: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-permissions").GetDiag()
}
}
@@ -3022,29 +3036,29 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
creationInfo := []map[string]interface{}{}
creationInfoMap, err := DataSourceIbmBackupRecoveryCreationInfoToMap(recovery.CreationInfo)
if err != nil {
- return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_recovery", "read", "creation_info-to-map").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Resource) ibm_recovery_download_files_folders", "read", "creation_info-to-map").GetDiag()
}
creationInfo = append(creationInfo, creationInfoMap)
if err = d.Set("recovery_creation_info", creationInfo); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting creation_info: %s", err), "(Data) ibm_recovery", "read", "set-creation_info").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting creation_info: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-creation_info").GetDiag()
}
}
if !core.IsNil(recovery.CanTearDown) {
if err = d.Set("recovery_can_tear_down", recovery.CanTearDown); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting can_tear_down: %s", err), "(Data) ibm_recovery", "read", "set-can_tear_down").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting can_tear_down: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-can_tear_down").GetDiag()
}
}
if !core.IsNil(recovery.TearDownStatus) {
if err = d.Set("recovery_tear_down_status", recovery.TearDownStatus); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting tear_down_status: %s", err), "(Data) ibm_recovery", "read", "set-tear_down_status").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting tear_down_status: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-tear_down_status").GetDiag()
}
}
if !core.IsNil(recovery.TearDownMessage) {
if err = d.Set("recovery_tear_down_message", recovery.TearDownMessage); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting tear_down_message: %s", err), "(Data) ibm_recovery", "read", "set-tear_down_message").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting tear_down_message: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-tear_down_message").GetDiag()
}
}
@@ -3054,23 +3068,23 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
messages = append(messages, messagesItem)
}
if err = d.Set("recovery_messages", messages); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting messages: %s", err), "(Data) ibm_recovery", "read", "set-messages").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting messages: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-messages").GetDiag()
}
} else {
if err = d.Set("recovery_messages", []interface{}{}); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting messages: %s", err), "(Data) ibm_recovery", "read", "set-messages").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting messages: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-messages").GetDiag()
}
}
if !core.IsNil(recovery.IsParentRecovery) {
if err = d.Set("is_parent_recovery", recovery.IsParentRecovery); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting is_parent_recovery: %s", err), "(Data) ibm_recovery", "read", "set-is_parent_recovery").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting is_parent_recovery: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-is_parent_recovery").GetDiag()
}
}
if !core.IsNil(recovery.ParentRecoveryID) {
if err = d.Set("parent_recovery_id", recovery.ParentRecoveryID); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting parent_recovery_id: %s", err), "(Data) ibm_recovery", "read", "set-parent_recovery_id").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting parent_recovery_id: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-parent_recovery_id").GetDiag()
}
}
@@ -3079,22 +3093,22 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
for _, retrieveArchiveTasksItem := range recovery.RetrieveArchiveTasks {
retrieveArchiveTasksItemMap, err := DataSourceIbmBackupRecoveryRetrieveArchiveTaskToMap(&retrieveArchiveTasksItem) // #nosec G601
if err != nil {
- return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_recovery", "read", "retrieve_archive_tasks-to-map").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Resource) ibm_recovery_download_files_folders", "read", "retrieve_archive_tasks-to-map").GetDiag()
}
retrieveArchiveTasks = append(retrieveArchiveTasks, retrieveArchiveTasksItemMap)
}
if err = d.Set("recovery_retrieve_archive_tasks", retrieveArchiveTasks); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting retrieve_archive_tasks: %s", err), "(Data) ibm_recovery", "read", "set-retrieve_archive_tasks").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting retrieve_archive_tasks: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-retrieve_archive_tasks").GetDiag()
}
} else {
if err = d.Set("recovery_retrieve_archive_tasks", []interface{}{}); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting mssql_params: %s", err), "(Data) ibm_recovery", "read", "set-mssql_params").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting mssql_params: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-mssql_params").GetDiag()
}
}
if !core.IsNil(recovery.IsMultiStageRestore) {
if err = d.Set("recovery_is_multi_stage_restore", recovery.IsMultiStageRestore); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting is_multi_stage_restore: %s", err), "(Data) ibm_recovery", "read", "set-is_multi_stage_restore").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting is_multi_stage_restore: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-is_multi_stage_restore").GetDiag()
}
}
@@ -3102,11 +3116,11 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
physicalParams := []map[string]interface{}{}
physicalParamsMap, err := DataSourceIbmBackupRecoveryRecoverPhysicalParamsToMap(recovery.PhysicalParams)
if err != nil {
- return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_recovery", "read", "physical_params-to-map").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Resource) ibm_recovery_download_files_folders", "read", "physical_params-to-map").GetDiag()
}
physicalParams = append(physicalParams, physicalParamsMap)
if err = d.Set("recovery_physical_params", physicalParams); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting physical_params: %s", err), "(Data) ibm_recovery", "read", "set-physical_params").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting physical_params: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-physical_params").GetDiag()
}
}
@@ -3114,15 +3128,15 @@ func resourceIbmBackupRecoveryDownloadFilesFoldersRead(context context.Context,
mssqlParams := []map[string]interface{}{}
mssqlParamsMap, err := DataSourceIbmBackupRecoveryRecoverSqlParamsToMap(recovery.MssqlParams)
if err != nil {
- return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Data) ibm_recovery", "read", "mssql_params-to-map").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "(Resource) ibm_recovery_download_files_folders", "read", "mssql_params-to-map").GetDiag()
}
mssqlParams = append(mssqlParams, mssqlParamsMap)
if err = d.Set("recovery_mssql_params", mssqlParams); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting mssql_params: %s", err), "(Data) ibm_recovery", "read", "set-mssql_params").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting mssql_params: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-mssql_params").GetDiag()
}
} else {
if err = d.Set("recovery_mssql_params", []interface{}{}); err != nil {
- return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting mssql_params: %s", err), "(Data) ibm_recovery", "read", "set-mssql_params").GetDiag()
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting mssql_params: %s", err), "(Resource) ibm_recovery_download_files_folders", "read", "set-mssql_params").GetDiag()
}
}
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders_test.go
index 753b267296..1fac0c1594 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_download_files_folders_test.go
@@ -19,7 +19,7 @@ import (
func TestAccIbmBackupRecoveryDownloadFilesFoldersBasic(t *testing.T) {
name := fmt.Sprintf("tf_recovery_download_files_folders_name_%d", acctest.RandIntRange(10, 100))
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -47,17 +47,19 @@ func testAccCheckIbmBackupRecoveryDownloadFilesFoldersConfigBasic(name string, o
data "ibm_backup_recovery_object_snapshots" "baas_object_snapshots_instance" {
x_ibm_tenant_id = "%s"
+
object_id = %d
}
resource "ibm_backup_recovery_download_files_folders" "baas_recovery_download_files_folders_instance" {
x_ibm_tenant_id = "%s"
name = "%s"
+
object {
snapshot_id = data.ibm_backup_recovery_object_snapshots.baas_object_snapshots_instance.snapshots[0].id
}
files_and_folders {
- absolute_path = "/data/"
+ absolute_path = "/mnt"
}
}
`, tenantId, objectId, tenantId, name)
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request.go
index c601920e4b..0cb1f52ebd 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request.go
@@ -18,7 +18,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -148,22 +147,6 @@ func checkDiffResourceIbmBackupRecoveryPerformActionOnProtectionGroupRun(context
return nil
}
-func ResourceIbmBackupRecoveryPerformActionOnProtectionGroupRunRequestValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "action",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Required: true,
- AllowedValues: "Cancel, Pause, Resume",
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_perform_action_on_protection_group_run_request", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoveryPerformActionOnProtectionGroupRunRequestCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -171,6 +154,17 @@ func resourceIbmBackupRecoveryPerformActionOnProtectionGroupRunRequestCreate(con
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
performActionOnProtectionGroupRunOptions := &backuprecoveryv1.PerformActionOnProtectionGroupRunOptions{}
performActionOnProtectionGroupRunOptions.SetXIBMTenantID(d.Get("x_ibm_tenant_id").(string))
@@ -222,6 +216,22 @@ func resourceIbmBackupRecoveryPerformActionOnProtectionGroupRunRequestCreate(con
d.SetId(resourceIbmBackupRecoveryProtectionRunActionID(d))
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_perform_action_on_protection_group_run_request", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_perform_action_on_protection_group_run_request", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_perform_action_on_protection_group_run_request", "read", "set-endpoint-type").GetDiag()
+ }
+
d.Set("action", performRunActionResponse.Action)
if !core.IsNil(performRunActionResponse.PauseParams) {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request_test.go
index 4212c21777..817710f097 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_perform_action_on_protection_group_run_request_test.go
@@ -17,9 +17,9 @@ import (
)
func TestAccIbmBackupRecoveryPerformActionOnProtectionGroupRunRequestBasic(t *testing.T) {
- objectId := 18
+ objectId := 344
runType := "kRegular"
- groupName := "terra-test-group-4" //"tf-group-5"
+ groupName := "tetst-terra-group-2" //"tf-group-5"
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
@@ -50,11 +50,13 @@ func testAccCheckIbmBackupRecoveryProtectionGroupRunRequest(groupName, runType s
data "ibm_backup_recovery_protection_groups" "ibm_backup_recovery_protection_groups_instance" {
x_ibm_tenant_id = "%s"
names = ["%s"]
+
}
resource "ibm_backup_recovery_protection_group_run_request" "baas_protection_group_run_request_instance" {
x_ibm_tenant_id = "%s"
run_type = "%s"
+
group_id = data.ibm_backup_recovery_protection_groups.ibm_backup_recovery_protection_groups_instance.protection_groups.0.id
lifecycle {
ignore_changes = ["x_ibm_tenant_id","run_type","group_id"]
@@ -106,16 +108,19 @@ func testAccCheckIbmBackupRecoveryPerformActionOnProtectionGroupRunRequestConfig
data "ibm_backup_recovery_protection_groups" "ibm_backup_recovery_protection_groups_instance" {
x_ibm_tenant_id = "%s"
+
names = ["%s"]
}
data "ibm_backup_recovery_protection_group_runs" "baas_protection_group_runs_instance" {
x_ibm_tenant_id = "%s"
+
protection_group_id = data.ibm_backup_recovery_protection_groups.ibm_backup_recovery_protection_groups_instance.protection_groups.0.id
}
resource "ibm_backup_recovery_perform_action_on_protection_group_run_request" "baas_perform_action_on_protection_group_run_request_instance" {
x_ibm_tenant_id = "%s"
+
group_id = data.ibm_backup_recovery_protection_group_runs.baas_protection_group_runs_instance.protection_group_id
action = "Cancel"
cancel_params {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group.go
index 55818a4e13..bc1ff24848 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group.go
@@ -18,7 +18,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -5073,36 +5072,6 @@ func ResourceIbmBackupRecoveryProtectionGroup() *schema.Resource {
}
}
-func ResourceIbmBackupRecoveryProtectionGroupValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "priority",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Optional: true,
- AllowedValues: "kHigh, kLow, kMedium",
- },
- validate.ValidateSchema{
- Identifier: "qos_policy",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Optional: true,
- AllowedValues: "kBackupAll, kBackupHDD, kBackupSSD, kTestAndDevHigh",
- },
- validate.ValidateSchema{
- Identifier: "environment",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Required: true,
- AllowedValues: "kPhysical, kSQL",
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_protection_group", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoveryProtectionGroupCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -5111,6 +5080,18 @@ func resourceIbmBackupRecoveryProtectionGroupCreate(context context.Context, d *
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
createProtectionGroupOptions := &backuprecoveryv1.CreateProtectionGroupOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
createProtectionGroupOptions.SetXIBMTenantID(tenantId)
@@ -5215,6 +5196,18 @@ func resourceIbmBackupRecoveryProtectionGroupRead(context context.Context, d *sc
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
getProtectionGroupByIdOptions := &backuprecoveryv1.GetProtectionGroupByIdOptions{}
tenantId := d.Get("x_ibm_tenant_id").(string)
@@ -5237,6 +5230,21 @@ func resourceIbmBackupRecoveryProtectionGroupRead(context context.Context, d *sc
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_recovery", "read", "set-backup-recovery-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_recovery", "read", "set-backup-recovery-region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_recovery", "read", "set-endpoint-type").GetDiag()
+ }
if err = d.Set("name", protectionGroupResponse.Name); err != nil {
err = fmt.Errorf("Error setting name: %s", err)
@@ -5488,6 +5496,17 @@ func resourceIbmBackupRecoveryProtectionGroupUpdate(context context.Context, d *
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
updateProtectionGroupOptions := &backuprecoveryv1.UpdateProtectionGroupOptions{}
@@ -5598,6 +5617,17 @@ func resourceIbmBackupRecoveryProtectionGroupDelete(context context.Context, d *
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
deleteProtectionGroupOptions := &backuprecoveryv1.DeleteProtectionGroupOptions{}
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request.go
index b346e52bed..431e0b7c51 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request.go
@@ -17,7 +17,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -421,28 +420,6 @@ func checkDiffResourceIbmBackupRecoveryProtectionGroupRun(context context.Contex
return nil
}
-func ResourceIbmBackupRecoveryProtectionGroupRunRequestValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "run_type",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Required: true,
- AllowedValues: "kFull, kHydrateCDP, kLog, kRegular, kStorageArraySnapshot, kSystem",
- },
- validate.ValidateSchema{
- Identifier: "group_id",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Required: true,
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_protection_group_run_request", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoveryProtectionGroupRunRequestCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -450,6 +427,17 @@ func resourceIbmBackupRecoveryProtectionGroupRunRequestCreate(context context.Co
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
createProtectionGroupRunOptions := &backuprecoveryv1.CreateProtectionGroupRunOptions{}
@@ -484,6 +472,22 @@ func resourceIbmBackupRecoveryProtectionGroupRunRequestCreate(context context.Co
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_protection_group_run_request", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_protection_group_run_request", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_protection_group_run_request", "read", "set-endpoint-type").GetDiag()
+ }
+
d.SetId(*createProtectionGroupRunResponse.ProtectionGroupID)
if err = d.Set("group_id", *createProtectionGroupRunResponse.ProtectionGroupID); err != nil {
return diag.FromErr(fmt.Errorf("Error setting group_id: %s", err))
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request_test.go
index 8aa3b190e3..b68ef62e19 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_run_request_test.go
@@ -17,9 +17,9 @@ import (
)
func TestAccIbmBackupRecoveryProtectionGroupRunRequestBasic(t *testing.T) {
- objectId := 18
+ objectId := 344
runType := "kRegular"
- groupName := "terra-test-group-4" //"tf-group-5"
+ groupName := "tetst-terra-group-2" //"tf-group-5"
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
@@ -49,6 +49,7 @@ func testAccCreateIbmBaasProtectionGroupRunRequest(groupName, runType string, ob
return fmt.Sprintf(`
data "ibm_backup_recovery_protection_groups" "ibm_backup_recovery_protection_groups_instance" {
+
x_ibm_tenant_id = "%s"
names = ["%s"]
}
@@ -56,6 +57,7 @@ func testAccCreateIbmBaasProtectionGroupRunRequest(groupName, runType string, ob
resource "ibm_backup_recovery_protection_group_run_request" "baas_protection_group_run_request_instance" {
x_ibm_tenant_id = "%s"
run_type = "%s"
+
group_id = data.ibm_backup_recovery_protection_groups.ibm_backup_recovery_protection_groups_instance.protection_groups.0.id
lifecycle {
ignore_changes = ["x_ibm_tenant_id","run_type","group_id"]
@@ -106,16 +108,19 @@ func testAccCreateIbmBaasProtectionGroupRunCancelRequestConfigBasic(runType, gro
return fmt.Sprintf(`
data "ibm_backup_recovery_protection_groups" "baas_protection_group_existing_instance" {
x_ibm_tenant_id = "%[1]s"
+
names = ["%[2]s"]
}
data "ibm_backup_recovery_protection_group_runs" "example_runs" {
+
x_ibm_tenant_id = "%[1]s"
protection_group_id = data.ibm_backup_recovery_protection_groups.baas_protection_group_existing_instance.protection_groups.0.id
}
resource "ibm_backup_recovery_perform_action_on_protection_group_run_request" "baas_perform_action_on_protection_group_run_request_instance" {
x_ibm_tenant_id = "%[1]s"
+
group_id = data.ibm_backup_recovery_protection_groups.baas_protection_group_existing_instance.protection_groups.0.id
action = "Cancel"
cancel_params {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_test.go
index 1d34791672..817a678a2d 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_group_test.go
@@ -25,7 +25,7 @@ func TestAccIbmBackupRecoveryProtectionGroupBasic(t *testing.T) {
includedPath := "/data2/data/"
includedPathUpdate := "/data1/"
protectionType := "kFile"
- objectId := 18
+ objectId := 344
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
@@ -83,6 +83,7 @@ func testAccCheckIbmBackupRecoveryProtectionGroupConfigBasic(name, environment,
resource "ibm_backup_recovery_protection_group" "baas_protection_group_instance" {
x_ibm_tenant_id = "%s"
+
policy_id = ibm_backup_recovery_protection_policy.baas_protection_policy_instance.policy_id
name = "%s"
environment = "%s"
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy.go
index 7a3e8d19fa..9dedc2e690 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy.go
@@ -18,7 +18,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -4200,22 +4199,6 @@ func ResourceIbmBackupRecoveryProtectionPolicy() *schema.Resource {
}
}
-func ResourceIbmBackupRecoveryProtectionPolicyValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "data_lock",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Optional: true,
- AllowedValues: "Administrative, Compliance",
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_protection_policy", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoveryProtectionPolicyCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -4223,6 +4206,17 @@ func resourceIbmBackupRecoveryProtectionPolicyCreate(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
tenantId := d.Get("x_ibm_tenant_id").(string)
@@ -4329,7 +4323,17 @@ func resourceIbmBackupRecoveryProtectionPolicyRead(context context.Context, d *s
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
-
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getProtectionPolicyByIdOptions := &backuprecoveryv1.GetProtectionPolicyByIdOptions{}
getProtectionPolicyByIdOptions.SetID(policyId)
@@ -4346,6 +4350,22 @@ func resourceIbmBackupRecoveryProtectionPolicyRead(context context.Context, d *s
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_source_registration", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_source_registration", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_source_registration", "read", "set-endpoint-type").GetDiag()
+ }
+
if err = d.Set("x_ibm_tenant_id", tenantId); err != nil {
err = fmt.Errorf("Error setting x_ibm_tenant_id: %s", err)
return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_protection_policy", "read", "set-x_ibm_tenant_id").GetDiag()
@@ -4502,6 +4522,17 @@ func resourceIbmBackupRecoveryProtectionPolicyUpdate(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
tenantId := d.Get("x_ibm_tenant_id").(string)
policyId := d.Id()
@@ -4606,6 +4637,17 @@ func resourceIbmBackupRecoveryProtectionPolicyDelete(context context.Context, d
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
tenantId := d.Get("x_ibm_tenant_id").(string)
policyId := d.Id()
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy_test.go
index 53533d2c63..838c4cdbc1 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_protection_policy_test.go
@@ -52,6 +52,7 @@ func testAccCheckIbmBackupRecoveryProtectionPolicyConfigBasic(name string, durat
resource "ibm_backup_recovery_protection_policy" "baas_protection_policy_instance" {
x_ibm_tenant_id = "%s"
name = "%s"
+
backup_policy {
regular {
incremental{
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_restore_points.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_restore_points.go
index 8b2d93b8ea..f2f2d92d30 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_restore_points.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_restore_points.go
@@ -18,7 +18,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -3365,22 +3364,6 @@ func checkDiffResourceIbmBackupRecoveryRestorePoints(context context.Context, d
return nil
}
-func ResourceIbmBackupRecoveryRestorePointsValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "environment",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Required: true,
- AllowedValues: "kAcropolis, kAD, kAWS, kAzure, kCassandra, kCouchbase, kElastifile, kExchange, kFlashBlade, kGCP, kGenericNas, kGPFS, kHBase, kHdfs, kHive, kHyperV, kIbmFlashSystem, kIsilon, kKubernetes, kKVM, kMongoDB, kNetapp, kO365, kOracle, kPhysical, kPure, kRemoteAdapter, kSAPHANA, kSfdc, kSQL, kUDA, kView, kVMware",
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_restore_points", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoveryRestorePointsCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -3388,6 +3371,17 @@ func resourceIbmBackupRecoveryRestorePointsCreate(context context.Context, d *sc
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getRestorePointsInTimeRangeOptions := &backuprecoveryv1.GetRestorePointsInTimeRangeOptions{}
@@ -3417,6 +3411,22 @@ func resourceIbmBackupRecoveryRestorePointsCreate(context context.Context, d *sc
d.SetId(resourceIbmBackupRecoveryRestorePointsID(d))
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_restore_points", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_restore_points", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_restore_points", "read", "set-endpoint-type").GetDiag()
+ }
+
if !core.IsNil(getRestorePointsInTimeRangeResponse.FullSnapshotInfo) {
fullSnapshotInfo := []map[string]interface{}{}
for _, fullSnapshotInfoItem := range getRestorePointsInTimeRangeResponse.FullSnapshotInfo {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration.go
index 28e9ebcc3d..b9c9d1f251 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration.go
@@ -19,7 +19,6 @@ import (
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
"github.com/IBM-Cloud/terraform-provider-ibm/ibm/flex"
- "github.com/IBM-Cloud/terraform-provider-ibm/ibm/validate"
"github.com/IBM/go-sdk-core/v5/core"
"github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
)
@@ -1066,22 +1065,6 @@ func ResourceIbmBackupRecoverySourceRegistration() *schema.Resource {
}
}
-func ResourceIbmBackupRecoverySourceRegistrationValidator() *validate.ResourceValidator {
- validateSchema := make([]validate.ValidateSchema, 0)
- validateSchema = append(validateSchema,
- validate.ValidateSchema{
- Identifier: "environment",
- ValidateFunctionIdentifier: validate.ValidateAllowedStringValue,
- Type: validate.TypeString,
- Required: true,
- AllowedValues: "kPhysical, kSQL",
- },
- )
-
- resourceValidator := validate.ResourceValidator{ResourceName: "ibm_backup_recovery_source_registration", Schema: validateSchema}
- return &resourceValidator
-}
-
func resourceIbmBackupRecoverySourceRegistrationCreate(context context.Context, d *schema.ResourceData, meta interface{}) diag.Diagnostics {
backupRecoveryClient, err := meta.(conns.ClientSession).BackupRecoveryV1()
if err != nil {
@@ -1089,6 +1072,17 @@ func resourceIbmBackupRecoverySourceRegistrationCreate(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
registerProtectionSourceOptions := &backuprecoveryv1.RegisterProtectionSourceOptions{}
@@ -1175,6 +1169,17 @@ func resourceIbmBackupRecoverySourceRegistrationRead(context context.Context, d
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
getProtectionSourceRegistrationOptions := &backuprecoveryv1.GetProtectionSourceRegistrationOptions{}
id, err := strconv.Atoi(registrationId)
@@ -1196,6 +1201,22 @@ func resourceIbmBackupRecoverySourceRegistrationRead(context context.Context, d
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_source_registration", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_source_registration", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_source_registration", "read", "set-endpoint-type").GetDiag()
+ }
+
if err = d.Set("environment", sourceRegistrationReponseParams.Environment); err != nil {
err = fmt.Errorf("Error setting environment: %s", err)
return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_source_registration", "read", "set-environment").GetDiag()
@@ -1334,6 +1355,18 @@ func resourceIbmBackupRecoverySourceRegistrationUpdate(context context.Context,
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
+
tenantId := d.Get("x_ibm_tenant_id").(string)
registrationId := d.Id()
if strings.Contains(d.Id(), "::") {
@@ -1482,6 +1515,17 @@ func resourceIbmBackupRecoverySourceRegistrationDelete(context context.Context,
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
deleteProtectionSourceRegistrationOptions := &backuprecoveryv1.DeleteProtectionSourceRegistrationOptions{}
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration_test.go
index 61e2dbd08c..43f44012ac 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_source_registration_test.go
@@ -17,15 +17,15 @@ import (
)
var (
- tenantIdRegister = "jhxqx715r9/"
+ tenantIdRegister = "wkk1yqrdce/"
)
func TestAccIbmBackupRecoverySourceRegistrationBasic(t *testing.T) {
var conf backuprecoveryv1.SourceRegistrationResponseParams
environment := "kPhysical"
- connectionId := "6456884682673709176"
- endpoint := "172.26.1.21"
+ connectionId := "5128356219792164864"
+ endpoint := "172.26.202.5"
hostType := "kLinux"
physicalType := "kHost"
applications := ""
@@ -70,6 +70,7 @@ func TestAccIbmBackupRecoverySourceRegistrationBasic(t *testing.T) {
func testAccCheckIbmBackupRecoverySourceRegistrationConfigBasic(environment, applications, endpoint, hostType, physicalType string, connectionId string) string {
return fmt.Sprintf(`
resource "ibm_backup_recovery_source_registration" "baas_source_registration_instance" {
+
x_ibm_tenant_id = "%s"
environment = "%s"
connection_id = "%s"
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_test.go
index 65cc17a6c7..c6bd599f2e 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_test.go
@@ -19,9 +19,9 @@ import (
func TestAccIbmBackupRecoveryBasic(t *testing.T) {
name := fmt.Sprintf("tf_recovery_name_%d", acctest.RandIntRange(10, 100))
snapshotEnvironment := "kPhysical"
- objectId := 18
+ objectId := 344
targetenvironment := "kPhysical"
- absolutePath := "/data/"
+ absolutePath := "/mnt"
restoreEntityType := "kRegular"
recoveryAction := "RecoverFiles"
@@ -47,11 +47,13 @@ func testAccCheckIbmBackupRecoveryConfigBasic(objectId int, name, snapshotEnviro
data "ibm_backup_recovery_object_snapshots" "object_snapshot" {
x_ibm_tenant_id = "%s"
+
object_id = %d
}
resource "ibm_backup_recovery" "baas_recovery_instance" {
x_ibm_tenant_id = "%s"
+
snapshot_environment = "%s"
name = "%s"
physical_params {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request.go
index 8e6b059788..a3eb36cfeb 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request.go
@@ -400,6 +400,17 @@ func resourceIbmBackupRecoveryUpdateProtectionGroupRunRequestCreate(context cont
log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
return tfErr.GetDiag()
}
+ endpointType := d.Get("endpoint_type").(string)
+ instanceId, region := getInstanceIdAndRegion(d)
+ if instanceId != "" && region != "" {
+ bmxsession, err := meta.(conns.ClientSession).BluemixSession()
+ if err != nil {
+ tfErr := flex.TerraformErrorf(err, fmt.Sprintf("unable to get clientSession"), "ibm_backup_recovery", "create")
+ log.Printf("[DEBUG]\n%s", tfErr.GetDebugMessage())
+ return tfErr.GetDiag()
+ }
+ backupRecoveryClient = getClientWithInstanceEndpoint(backupRecoveryClient, bmxsession, instanceId, region, endpointType)
+ }
updateProtectionGroupRunOptions := &backuprecoveryv1.UpdateProtectionGroupRunOptions{}
@@ -424,6 +435,22 @@ func resourceIbmBackupRecoveryUpdateProtectionGroupRunRequestCreate(context cont
return tfErr.GetDiag()
}
+ if instanceId != "" {
+ if err := d.Set("instance_id", instanceId); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting instance_id: %s", err), "(Resource) ibm_backup_recovery_update_protection_group_run_request", "read", "set-instance-id").GetDiag()
+ }
+ }
+ if region != "" {
+ if err := d.Set("region", region); err != nil {
+ return flex.DiscriminatedTerraformErrorf(err, fmt.Sprintf("Error setting region: %s", err), "(Resource) ibm_backup_recovery_update_protection_group_run_request", "read", "set--region").GetDiag()
+ }
+ }
+
+ if err = d.Set("endpoint_type", d.Get("endpoint_type").(string)); err != nil {
+ err = fmt.Errorf("Error setting endpoint_type: %s", err)
+ return flex.DiscriminatedTerraformErrorf(err, err.Error(), "ibm_backup_recovery_update_protection_group_run_request", "read", "set-endpoint-type").GetDiag()
+ }
+
d.Set("successful_run_ids", strings.Join(updateProtectionGroupRunResponse.SuccessfulRunIds[:], ","))
if !core.IsNil(updateProtectionGroupRunResponse.FailedRuns) {
diff --git a/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request_test.go b/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request_test.go
index 3475e508d7..bca339c6ed 100644
--- a/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request_test.go
+++ b/ibm/service/backuprecovery/resource_ibm_backup_recovery_update_protection_group_run_request_test.go
@@ -17,9 +17,9 @@ import (
)
func TestAccIbmBackupRecoveryUpdateProtectionGroupRunRequestBasic(t *testing.T) {
- objectId := 18
+ objectId := 344
runType := "kRegular"
- groupName := "terra-test-group-4" // or can use "tf-group-5" //id: 5901263190628181:1725393921826:9414
+ groupName := "tetst-terra-group-2" // or can use "tf-group-5" //id: 5901263190628181:1725393921826:9414
resource.Test(t, resource.TestCase{
PreCheck: func() { acc.TestAccPreCheck(t) },
Providers: acc.TestAccProviders,
@@ -91,10 +91,12 @@ func testAccCreateIbmBaasProtectionGroupForUpdateRunRequest(groupName, runType s
data "ibm_backup_recovery_protection_groups" "ibm_backup_recovery_protection_groups_instance" {
x_ibm_tenant_id = "%s"
+
names = ["%s"]
}
resource "ibm_backup_recovery_protection_group_run_request" "baas_protection_group_run_request_instance" {
+
x_ibm_tenant_id = "%s"
run_type = "%s"
group_id = data.ibm_backup_recovery_protection_groups.ibm_backup_recovery_protection_groups_instance.protection_groups.0.id
@@ -109,16 +111,19 @@ func testAccCreateIbmBaasProtectionGroupRunUpdateRequestConfigBasic(runType, gro
return fmt.Sprintf(`
data "ibm_backup_recovery_protection_groups" "baas_protection_group_existing_instance" {
x_ibm_tenant_id = "%[1]s"
+
names = ["%[2]s"]
}
data "ibm_backup_recovery_protection_group_runs" "example_runs" {
x_ibm_tenant_id = "%[1]s"
+
protection_group_id = data.ibm_backup_recovery_protection_groups.baas_protection_group_existing_instance.protection_groups.0.id
}
resource "ibm_backup_recovery_update_protection_group_run_request" "baas_update_protection_group_run_request_instance" {
x_ibm_tenant_id = "%[1]s"
+
group_id = data.ibm_backup_recovery_protection_groups.baas_protection_group_existing_instance.protection_groups.0.id
update_protection_group_run_params {
run_id = data.ibm_backup_recovery_protection_group_runs.example_runs.runs.0.id
@@ -134,11 +139,13 @@ func testAccCreateIbmBaasProtectionGroupRunCancelUpdateRequestConfigBasic(runTyp
return fmt.Sprintf(`
data "ibm_backup_recovery_protection_groups" "baas_protection_group_existing_instance" {
x_ibm_tenant_id = "%[1]s"
+
names = ["%[2]s"]
}
data "ibm_backup_recovery_protection_group_runs" "example_runs" {
x_ibm_tenant_id = "%[1]s"
+
protection_group_id = data.ibm_backup_recovery_protection_groups.baas_protection_group_existing_instance.protection_groups.0.id
}
@@ -146,6 +153,7 @@ func testAccCreateIbmBaasProtectionGroupRunCancelUpdateRequestConfigBasic(runTyp
x_ibm_tenant_id = "%[1]s"
group_id = data.ibm_backup_recovery_protection_groups.baas_protection_group_existing_instance.protection_groups.0.id
action = "Cancel"
+
cancel_params {
run_id = data.ibm_backup_recovery_protection_group_runs.example_runs.runs.0.id
local_task_id = data.ibm_backup_recovery_protection_group_runs.example_runs.runs.0.archival_info.0.archival_target_results.0.archival_task_id
diff --git a/ibm/service/backuprecovery/utils.go b/ibm/service/backuprecovery/utils.go
new file mode 100644
index 0000000000..f0edc9e302
--- /dev/null
+++ b/ibm/service/backuprecovery/utils.go
@@ -0,0 +1,95 @@
+package backuprecovery
+
+import (
+ "fmt"
+ "os"
+ "strings"
+
+ session "github.com/IBM-Cloud/bluemix-go/session"
+ "github.com/IBM-Cloud/terraform-provider-ibm/ibm/conns"
+ "github.com/IBM/ibm-backup-recovery-sdk-go/backuprecoveryv1"
+ "github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
+)
+
+// get instanceId and region
+func getInstanceIdAndRegion(d *schema.ResourceData) (string, string) {
+
+ var region string
+ var instanceId string
+
+ if _, ok := d.GetOk("instance_id"); ok {
+ instanceId = d.Get("instance_id").(string)
+ }
+
+ if _, ok := d.GetOk("region"); ok {
+ region = d.Get("region").(string)
+ }
+
+ return instanceId, region
+
+}
+
+// Clone the base backup recovery client and set the API endpoint per the instance
+func getClientWithInstanceEndpoint(originalClient *backuprecoveryv1.BackupRecoveryV1, bmxsession *session.Session, instanceId, region, endpointType string) *backuprecoveryv1.BackupRecoveryV1 {
+ // build the api endpoint
+
+ // default endpoint_type is set to public
+ if instanceId == "" && region == "" {
+ return originalClient
+ }
+
+ domain := "cloud.ibm.com"
+ serviceName := "backup-recovery"
+
+ endpointsFile := bmxsession.Config.EndpointsFile
+
+ iamUrl := os.Getenv("IBMCLOUD_IAM_API_ENDPOINT")
+ if iamUrl == "" {
+ iamUrl = conns.FileFallBack(endpointsFile, endpointType, "IBMCLOUD_IAM_API_ENDPOINT", region, "https://iam.cloud.ibm.com")
+ }
+
+ if strings.Contains(iamUrl, "test") {
+ domain = "test.cloud.ibm.com"
+ }
+
+ var endpoint string
+ if endpointType == "private" {
+ endpoint = fmt.Sprintf("https://%s.private.%s.%s.%s/v2", instanceId, region, serviceName, domain)
+ } else {
+ endpoint = fmt.Sprintf("https://%s.%s.%s.%s/v2", instanceId, region, serviceName, domain)
+ }
+
+ // clone the client and set endpoint
+ newClient := &backuprecoveryv1.BackupRecoveryV1{
+ Service: originalClient.Service.Clone(),
+ }
+ newClient.Service.SetServiceURL(endpoint)
+ return newClient
+}
+
+// Add the fields needed for building the instance endpoint to the given schema
+func AddInstanceFields(resource *schema.Resource) *schema.Resource {
+ resource.Schema["instance_id"] = &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ ForceNew: true,
+ Description: "The instnace ID of the Backup Recovery instance.",
+ RequiredWith: []string{"region"},
+ }
+ resource.Schema["region"] = &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Computed: true,
+ ForceNew: true,
+ Description: "The region of the Backup Recovery instance.",
+ RequiredWith: []string{"instance_id"},
+ }
+ resource.Schema["endpoint_type"] = &schema.Schema{
+ Type: schema.TypeString,
+ Optional: true,
+ Description: "public or private.",
+ Default: "public",
+ }
+
+ return resource
+}
diff --git a/website/docs/d/backup_recoveries.html.markdown b/website/docs/d/backup_recoveries.html.markdown
index eb335bb2ac..91d6719f05 100644
--- a/website/docs/d/backup_recoveries.html.markdown
+++ b/website/docs/d/backup_recoveries.html.markdown
@@ -56,6 +56,9 @@ Nested schema for **recoveries**:
* Constraints: The value must match regular expression `/^\\d+:\\d+:\\d+$/`.
* `is_multi_stage_restore` - (Boolean) Specifies whether the current recovery operation is a multi-stage restore operation. This is currently used by VMware recoveres for the migration/hot-standby use case.
* `is_parent_recovery` - (Boolean) Specifies whether the current recovery operation has created child recoveries. This is currently used in SQL recovery where multiple child recoveries can be tracked under a common/parent recovery.
+ * `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `messages` - (List) Specifies messages about the recovery.
* `mssql_params` - (List) Specifies the recovery options specific to Sql environment.
Nested schema for **mssql_params**:
diff --git a/website/docs/d/backup_recovery_agent_upgrade_tasks.html.markdown b/website/docs/d/backup_recovery_agent_upgrade_tasks.html.markdown
index 79f057b8e0..f3ccac94b8 100644
--- a/website/docs/d/backup_recovery_agent_upgrade_tasks.html.markdown
+++ b/website/docs/d/backup_recovery_agent_upgrade_tasks.html.markdown
@@ -23,6 +23,9 @@ data "ibm_backup_recovery_agent_upgrade_tasks" "backup_recovery_agent_upgrade_ta
You can specify the following arguments for this data source.
* `ids` - (Optional, List) Specifies IDs of tasks to be fetched.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_connectors_metadata.html.markdown b/website/docs/d/backup_recovery_connectors_metadata.html.markdown
index 5866e2255d..3713f7a1ee 100644
--- a/website/docs/d/backup_recovery_connectors_metadata.html.markdown
+++ b/website/docs/d/backup_recovery_connectors_metadata.html.markdown
@@ -23,6 +23,9 @@ data "ibm_backup_recovery_connectors_metadata" "backup_recovery_connectors_metad
You can specify the following arguments for this data source.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_data_source_connections.html.markdown b/website/docs/d/backup_recovery_data_source_connections.html.markdown
index 40212b1592..5598d66aa1 100644
--- a/website/docs/d/backup_recovery_data_source_connections.html.markdown
+++ b/website/docs/d/backup_recovery_data_source_connections.html.markdown
@@ -25,6 +25,9 @@ You can specify the following arguments for this data source.
* `connection_ids` - (Optional, List) Specifies the unique IDs of the connections which are to be fetched.
* `connection_names` - (Optional, List) Specifies the names of the connections which are to be fetched.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_data_source_connectors.html.markdown b/website/docs/d/backup_recovery_data_source_connectors.html.markdown
index 73a1b066f1..1db5d40ef8 100644
--- a/website/docs/d/backup_recovery_data_source_connectors.html.markdown
+++ b/website/docs/d/backup_recovery_data_source_connectors.html.markdown
@@ -26,6 +26,9 @@ You can specify the following arguments for this data source.
* `connector_ids` - (Optional, List) Specifies the unique IDs of the connectors which are to be fetched.
* `connector_names` - (Optional, List) Specifies the names of the connectors which are to be fetched.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_download_agent.html.markdown b/website/docs/d/backup_recovery_download_agent.html.markdown
index e02aa24807..d75eacaeef 100644
--- a/website/docs/d/backup_recovery_download_agent.html.markdown
+++ b/website/docs/d/backup_recovery_download_agent.html.markdown
@@ -32,6 +32,9 @@ Nested schema for **linux_params**:
* `file_path` - (Required, String) Specifies the absolute path for download.
* Constraints: Allowable values are: `kWindows`, `kLinux`.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_download_files.html.markdown b/website/docs/d/backup_recovery_download_files.html.markdown
index d403c51a4b..3213f231a2 100644
--- a/website/docs/d/backup_recovery_download_files.html.markdown
+++ b/website/docs/d/backup_recovery_download_files.html.markdown
@@ -32,6 +32,9 @@ You can specify the following arguments for this data source.
* `start_offset` - (Optional, Integer) Specifies the start offset of file chunk to be downloaded.
* `start_time` - (Optional, String) Specifies the start time of restore task.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_download_indexed_files.html.markdown b/website/docs/d/backup_recovery_download_indexed_files.html.markdown
index 8de91070cd..9feb2e8197 100644
--- a/website/docs/d/backup_recovery_download_indexed_files.html.markdown
+++ b/website/docs/d/backup_recovery_download_indexed_files.html.markdown
@@ -30,6 +30,9 @@ You can specify the following arguments for this data source.
* `snapshots_id` - (Required, Forces new resource, String) Specifies the snapshot id to download from.
* `start_offset` - (Optional, Integer) Specifies the start offset of file chunk to be downloaded.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/d/backup_recovery_object_snapshots.html.markdown b/website/docs/d/backup_recovery_object_snapshots.html.markdown
index d59647ce2e..f98c6ce307 100644
--- a/website/docs/d/backup_recovery_object_snapshots.html.markdown
+++ b/website/docs/d/backup_recovery_object_snapshots.html.markdown
@@ -25,6 +25,9 @@ You can specify the following arguments for this data source.
* `object_id` - (Required, Forces new resource, Integer) Specifies the id of the Object.
* `from_time_usecs` - (Optional, Integer) Specifies the timestamp in Unix time epoch in microseconds to filter Object's snapshots which were taken after this value.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `object_action_keys` - (Optional, List) Filter by ObjectActionKey, which uniquely represents the protection of an object. An object can be protected in multiple ways but at most once for a given combination of ObjectActionKey. When specified, only snapshots matching the given action keys are returned for the corresponding object.
* Constraints: Allowable list items are: `kVMware`, `kHyperV`, `kVCD`, `kAzure`, `kGCP`, `kKVM`, `kAcropolis`, `kAWS`, `kAWSNative`, `kAwsS3`, `kAWSSnapshotManager`, `kRDSSnapshotManager`, `kAuroraSnapshotManager`, `kAwsRDSPostgresBackup`, `kAwsRDSPostgres`, `kAwsAuroraPostgres`, `kAzureNative`, `kAzureSQL`, `kAzureSnapshotManager`, `kPhysical`, `kPhysicalFiles`, `kGPFS`, `kElastifile`, `kNetapp`, `kGenericNas`, `kIsilon`, `kFlashBlade`, `kPure`, `kIbmFlashSystem`, `kSQL`, `kExchange`, `kAD`, `kOracle`, `kView`, `kRemoteAdapter`, `kO365`, `kO365PublicFolders`, `kO365Teams`, `kO365Group`, `kO365Exchange`, `kO365OneDrive`, `kO365Sharepoint`, `kKubernetes`, `kCassandra`, `kMongoDB`, `kCouchbase`, `kHdfs`, `kHive`, `kHBase`, `kSAPHANA`, `kUDA`, `kSfdc`, `kO365ExchangeCSM`, `kO365OneDriveCSM`, `kO365SharepointCSM`.
* `protection_group_ids` - (Optional, List) If specified, this returns only the snapshots of the specified object ID, which belong to the provided protection group IDs.
diff --git a/website/docs/d/backup_recovery_protection_group.html.markdown b/website/docs/d/backup_recovery_protection_group.html.markdown
index 4e75679f99..23aaeecfed 100644
--- a/website/docs/d/backup_recovery_protection_group.html.markdown
+++ b/website/docs/d/backup_recovery_protection_group.html.markdown
@@ -24,6 +24,9 @@ data "ibm_backup_recovery_protection_group" "backup_recovery_protection_group" {
You can specify the following arguments for this data source.
* `protection_group_id` - (Required, Forces new resource, String) Specifies a unique id of the Protection Group.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `include_last_run_info` - (Optional, Boolean) If true, the response will include last run info. If it is false or not specified, the last run info won't be returned.
* `prune_excluded_source_ids` - (Optional, Boolean) If true, the response will not include the list of excluded source IDs in groups that contain this field. This can be set to true in order to improve performance if excluded source IDs are not needed by the user.
* `prune_source_ids` - (Optional, Boolean) If true, the response will exclude the list of source IDs within the group specified.
diff --git a/website/docs/d/backup_recovery_protection_group_runs.html.markdown b/website/docs/d/backup_recovery_protection_group_runs.html.markdown
index 91e834bd87..c7771fb9f2 100644
--- a/website/docs/d/backup_recovery_protection_group_runs.html.markdown
+++ b/website/docs/d/backup_recovery_protection_group_runs.html.markdown
@@ -24,6 +24,9 @@ data "ibm_backup_recovery_protection_group_runs" "backup_recovery_protection_gro
You can specify the following arguments for this data source.
* `archival_run_status` - (Optional, List) Specifies a list of archival status, runs matching the status will be returned.
'Running' indicates that the run is still running.
'Canceled' indicates that the run has been canceled.
'Canceling' indicates that the run is in the process of being canceled.
'Failed' indicates that the run has failed.
'Missed' indicates that the run was unable to take place at the scheduled time because the previous run was still happening.
'Succeeded' indicates that the run has finished successfully.
'SucceededWithWarning' indicates that the run finished successfully, but there were some warning messages.
'Paused' indicates that the ongoing run has been paused.
'Skipped' indicates that the run was skipped.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* Constraints: Allowable list items are: `Accepted`, `Running`, `Canceled`, `Canceling`, `Failed`, `Missed`, `Succeeded`, `SucceededWithWarning`, `OnHold`, `Finalizing`, `Skipped`, `Paused`.
* `protection_group_id` - (Required, Forces new resource, String) Specifies a unique id of the Protection Group.
* Constraints: The value must match regular expression `/^\\d+:\\d+:\\d+$/`.
diff --git a/website/docs/d/backup_recovery_protection_groups.html.markdown b/website/docs/d/backup_recovery_protection_groups.html.markdown
index 6a6f887f13..91a42cfce1 100644
--- a/website/docs/d/backup_recovery_protection_groups.html.markdown
+++ b/website/docs/d/backup_recovery_protection_groups.html.markdown
@@ -25,6 +25,9 @@ You can specify the following arguments for this data source.
* `environments` - (Optional, List) Filter by environment types such as 'kVMware', 'kView', etc. Only Protection Groups protecting the specified environment types are returned.
* Constraints: Allowable list items are: `kPhysical`, `kSQL`.
* `ids` - (Optional, List) Filter by a list of Protection Group ids.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `include_groups_with_datalock_only` - (Optional, Boolean) Whether to only return Protection Groups with a datalock.
* `include_last_run_info` - (Optional, Boolean) If true, the response will include last run info. If it is false or not specified, the last run info won't be returned.
* `is_active` - (Optional, Boolean) Filter by Inactive or Active Protection Groups. If not set, all Inactive and Active Protection Groups are returned. If true, only Active Protection Groups are returned. If false, only Inactive Protection Groups are returned. When you create a Protection Group on a Primary Cluster with a replication schedule, the Cluster creates an Inactive copy of the Protection Group on the Remote Cluster. In addition, when an Active and running Protection Group is deactivated, the Protection Group becomes Inactive.
diff --git a/website/docs/d/backup_recovery_protection_policies.html.markdown b/website/docs/d/backup_recovery_protection_policies.html.markdown
index 1e735e8237..33b0c79ec5 100644
--- a/website/docs/d/backup_recovery_protection_policies.html.markdown
+++ b/website/docs/d/backup_recovery_protection_policies.html.markdown
@@ -23,6 +23,9 @@ data "ibm_backup_recovery_protection_policies" "backup_recovery_protection_polic
You can specify the following arguments for this data source.
* `exclude_linked_policies` - (Optional, Boolean) If excludeLinkedPolicies is set to true then only local policies created on cluster will be returned. The result will exclude all linked policies created from policy templates.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `ids` - (Optional, List) Filter policies by a list of policy ids.
* `include_replicated_policies` - (Optional, Boolean) If includeReplicatedPolicies is set to true, then response will also contain replicated policies. By default, replication policies are not included in the response.
* `include_stats` - (Optional, Boolean) If includeStats is set to true, then response will return number of protection groups and objects. By default, the protection stats are not included in the response.
diff --git a/website/docs/d/backup_recovery_protection_policy.html.markdown b/website/docs/d/backup_recovery_protection_policy.html.markdown
index f9c107bfd9..ee941e4f9e 100644
--- a/website/docs/d/backup_recovery_protection_policy.html.markdown
+++ b/website/docs/d/backup_recovery_protection_policy.html.markdown
@@ -24,6 +24,9 @@ data "ibm_backup_recovery_protection_policy" "backup_recovery_protection_policy"
You can specify the following arguments for this data source.
* `protection_policy_id` - (Required, Forces new resource, String) Specifies a unique id of the Protection Policy to return.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `request_initiator_type` - (Optional, String) Specifies the type of request from UI, which is used for services like magneto to determine the priority of requests.
* Constraints: Allowable values are: `UIUser`, `UIAuto`, `Helios`.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
diff --git a/website/docs/d/backup_recovery_protection_sources.html.markdown b/website/docs/d/backup_recovery_protection_sources.html.markdown
index f4f792caa0..7f96defd07 100644
--- a/website/docs/d/backup_recovery_protection_sources.html.markdown
+++ b/website/docs/d/backup_recovery_protection_sources.html.markdown
@@ -23,6 +23,9 @@ data "ibm_backup_recovery_protection_sources" "backup_recovery_protection_source
You can specify the following arguments for this data source.
* `after_cursor_entity_id` - (Optional, Integer) Specifies the entity id starting from which the items are to be returned.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `all_under_hierarchy` - (Optional, Boolean) AllUnderHierarchy specifies if objects of all the tenants under the hierarchy of the logged in user's organization should be returned.
* `backup_recovery_protection_sources_id` - (Optional, Integer) Return the Object subtree for the passed in Protection Source id.
* `before_cursor_entity_id` - (Optional, Integer) Specifies the entity id upto which the items are to be returned.
diff --git a/website/docs/d/backup_recovery_search_indexed_object.html.markdown b/website/docs/d/backup_recovery_search_indexed_object.html.markdown
index 2d82d4033c..a53219a0e0 100644
--- a/website/docs/d/backup_recovery_search_indexed_object.html.markdown
+++ b/website/docs/d/backup_recovery_search_indexed_object.html.markdown
@@ -241,6 +241,9 @@ Nested schema for **couchbase_params**:
* `search_string` - (Required, String) Specifies the search string to search the Couchbase Objects.
* `source_ids` - (Optional, List) Specifies a list of source ids. Only files found in these sources will be returned.
* `count` - (Optional, Forces new resource, Integer) Specifies the number of indexed objects to be fetched for the specified pagination cookie.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `email_params` - (Optional, Forces new resource, List) Specifies the request parameters to search for emails and email folders.
Nested schema for **email_params**:
* `attendees_addresses` - (Optional, List) Filters the calendar items which have specified email addresses as attendees.
diff --git a/website/docs/d/backup_recovery_search_objects.html.markdown b/website/docs/d/backup_recovery_search_objects.html.markdown
index ed8d75e283..e278dc5822 100644
--- a/website/docs/d/backup_recovery_search_objects.html.markdown
+++ b/website/docs/d/backup_recovery_search_objects.html.markdown
@@ -24,6 +24,9 @@ You can specify the following arguments for this data source.
* `cluster_identifiers` - (Optional, List) Specifies the list of cluster identifiers. Format is clusterId:clusterIncarnationId. Only records from clusters having these identifiers will be returned.
* `count` - (Optional, Integer) Specifies the number of objects to be fetched for the specified pagination cookie.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `environments` - (Optional, List) Specifies the environment type to filter objects.
* Constraints: Allowable list items are: `kPhysical`, `kSQL`.
* `external_filters` - (Optional, List) Specifies the key-value pairs to filtering the results for the search. Each filter is of the form 'key:value'. The filter 'externalFilters:k1:v1&externalFilters:k2:v2&externalFilters:k2:v3' returns the documents where each document will match the query (k1=v1) AND (k2=v2 OR k2 = v3). Allowed keys: - vmBiosUuid - graphUuid - arn - instanceId - bucketName - azureId.
diff --git a/website/docs/d/backup_recovery_search_protected_objects.html.markdown b/website/docs/d/backup_recovery_search_protected_objects.html.markdown
index 200b59d8e7..48ac6ba6e7 100644
--- a/website/docs/d/backup_recovery_search_protected_objects.html.markdown
+++ b/website/docs/d/backup_recovery_search_protected_objects.html.markdown
@@ -23,6 +23,9 @@ data "ibm_backup_recovery_search_protected_objects" "backup_recovery_search_prot
You can specify the following arguments for this data source.
* `cdp_protected_only` - (Optional, Boolean) Specifies whether to only return the CDP protected objects.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `environments` - (Optional, List) Specifies the environment type to filter objects.
* Constraints: Allowable list items are: `kPhysical`, `kSQL`.
* `filter_snapshot_from_usecs` - (Optional, Integer) Specifies the timestamp in Unix time epoch in microseconds to filter the objects if the Object has a successful snapshot after this value.
diff --git a/website/docs/d/backup_recovery_source_registration.html.markdown b/website/docs/d/backup_recovery_source_registration.html.markdown
index 1816d0cdb3..151715eef0 100644
--- a/website/docs/d/backup_recovery_source_registration.html.markdown
+++ b/website/docs/d/backup_recovery_source_registration.html.markdown
@@ -24,6 +24,9 @@ data "ibm_backup_recovery_source_registration" "backup_recovery_source_registrat
You can specify the following arguments for this data source.
* `source_registration_id` - (Required, Forces new resource, Integer) Specifies the id of the Protection Source registration.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `request_initiator_type` - (Optional, String) Specifies the type of request from UI, which is used for services like magneto to determine the priority of requests.
* Constraints: Allowable values are: `UIUser`, `UIAuto`, `Helios`.
* `x_ibm_tenant_id` - (Required, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
diff --git a/website/docs/d/backup_recovery_source_registrations.html.markdown b/website/docs/d/backup_recovery_source_registrations.html.markdown
index 4d913882b7..71c1410783 100644
--- a/website/docs/d/backup_recovery_source_registrations.html.markdown
+++ b/website/docs/d/backup_recovery_source_registrations.html.markdown
@@ -23,6 +23,9 @@ data "ibm_backup_recovery_source_registrations" "backup_recovery_source_registra
You can specify the following arguments for this data source.
* `encryption_key` - (Optional, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `ids` - (Optional, List) Ids specifies the list of source registration ids to return. If left empty, every source registration will be returned by default.
* `ignore_tenant_migration_in_progress_check` - (Optional, Boolean) If true, tenant migration check will be ignored.
* `include_external_metadata` - (Optional, Boolean) If true, the external entity metadata like maintenance mode config for the registered sources will be included.
diff --git a/website/docs/r/backup_recovery.html.markdown b/website/docs/r/backup_recovery.html.markdown
index bca4edf8ed..434945bd83 100644
--- a/website/docs/r/backup_recovery.html.markdown
+++ b/website/docs/r/backup_recovery.html.markdown
@@ -606,6 +606,9 @@ Nested schema for **mssql_params**:
* `id` - (Optional, Integer) If this is set, then the Cohesity host name or the IP address associated with this vlan is used for mounting Cohesity's view on the remote host.
* `interface_name` - (Computed, String) Interface group to use for Recovery.
* `name` - (Required, Forces new resource, String) Specifies the name of the Recovery.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `physical_params` - (Optional, Forces new resource, List) Specifies the recovery options specific to Physical environment.
Nested schema for **physical_params**:
* `download_file_and_folder_params` - (Optional, List) Specifies the parameters to download files and folders.
diff --git a/website/docs/r/backup_recovery_agent_upgrade_task.html.markdown b/website/docs/r/backup_recovery_agent_upgrade_task.html.markdown
index 7864db7a79..3ad5c7d0e1 100644
--- a/website/docs/r/backup_recovery_agent_upgrade_task.html.markdown
+++ b/website/docs/r/backup_recovery_agent_upgrade_task.html.markdown
@@ -31,6 +31,9 @@ You can specify the following arguments for this resource.
* `agent_ids` - (Optional, Forces new resource, List) Specifies the agents upgraded in the task.
* `description` - (Optional, Forces new resource, String) Specifies the description of the task.
* `name` - (Optional, Forces new resource, String) Specifies the name of the task.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `schedule_end_time_usecs` - (Optional, Forces new resource, Integer) Specifies the time before which the upgrade task should start execution as a Unix epoch Timestamp (in microseconds). If this is not specified the task will start anytime after scheduleTimeUsecs.
* `schedule_time_usecs` - (Optional, Forces new resource, Integer) Specifies the time when the task should start execution as a Unix epoch Timestamp (in microseconds). If no schedule is specified, the task will start immediately.
* `x_ibm_tenant_id` - (Required, Forces new resource, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
diff --git a/website/docs/r/backup_recovery_connection_registration_token.html.markdown b/website/docs/r/backup_recovery_connection_registration_token.html.markdown
index 3dca464702..93d8a048f6 100644
--- a/website/docs/r/backup_recovery_connection_registration_token.html.markdown
+++ b/website/docs/r/backup_recovery_connection_registration_token.html.markdown
@@ -30,6 +30,9 @@ You can specify the following arguments for this resource.
* `connection_id` - (Required, Forces new resource, String) Specifies the ID of the connection, connectors belonging to which are to be fetched.
* `x_ibm_tenant_id` - (Required, Forces new resource, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/r/backup_recovery_data_source_connection.html.markdown b/website/docs/r/backup_recovery_data_source_connection.html.markdown
index 373736cf11..890774d323 100644
--- a/website/docs/r/backup_recovery_data_source_connection.html.markdown
+++ b/website/docs/r/backup_recovery_data_source_connection.html.markdown
@@ -24,6 +24,9 @@ You can specify the following arguments for this resource.
* `connection_name` - (Required, String) Specifies the name of the connection. For a given tenant, different connections can't have the same name. However, two (or more) different tenants can each have a connection with the same name.
* `x_ibm_tenant_id` - (Optional, String) Id of the tenant accessing the cluster.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/r/backup_recovery_data_source_connector_patch.html.markdown b/website/docs/r/backup_recovery_data_source_connector_patch.html.markdown
index 8cc8261a7d..4eb43b716d 100644
--- a/website/docs/r/backup_recovery_data_source_connector_patch.html.markdown
+++ b/website/docs/r/backup_recovery_data_source_connector_patch.html.markdown
@@ -24,6 +24,9 @@ resource "ibm_backup_recovery_data_source_connector_patch" "backup_recovery_data
You can specify the following arguments for this resource.
* `connector_id` - (Required, Forces new resource, String) Specifies the unique ID of the connector which is to be deleted.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `connector_name` - (Optional, Forces new resource, String) Specifies the name of the connector. The name of a connector need not be unique within a tenant or across tenants. The name of the connector can be updated as needed.
* `x_ibm_tenant_id` - (Required, Forces new resource, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
diff --git a/website/docs/r/backup_recovery_download_files_folders.html.markdown b/website/docs/r/backup_recovery_download_files_folders.html.markdown
index 1971d0efc8..4ff3b8013a 100644
--- a/website/docs/r/backup_recovery_download_files_folders.html.markdown
+++ b/website/docs/r/backup_recovery_download_files_folders.html.markdown
@@ -150,6 +150,9 @@ Nested schema for **files_and_folders**:
* `glacier_retrieval_type` - (Optional, Forces new resource, String) Specifies the glacier retrieval type when restoring or downloding files or folders from a Glacier-based cloud snapshot.
* Constraints: Allowable values are: `kStandard`, `kExpeditedNoPCU`, `kExpeditedWithPCU`.
* `name` - (Required, Forces new resource, String) Specifies the name of the recovery task. This field must be set and must be a unique name.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `object` - (Required, Forces new resource, List) Specifies the common snapshot parameters for a protected object.
Nested schema for **object**:
* `archival_target_info` - (Optional, List) Specifies the archival target information if the snapshot is an archival snapshot.
diff --git a/website/docs/r/backup_recovery_perform_action_on_protection_group_run_request.html.markdown b/website/docs/r/backup_recovery_perform_action_on_protection_group_run_request.html.markdown
index 675122a29b..67a8d501ec 100644
--- a/website/docs/r/backup_recovery_perform_action_on_protection_group_run_request.html.markdown
+++ b/website/docs/r/backup_recovery_perform_action_on_protection_group_run_request.html.markdown
@@ -68,6 +68,9 @@ Nested schema for **resume_params**:
* Constraints: The value must match regular expression `/^\\d+:\\d+$/`.
* `x_ibm_tenant_id` - (Required, Forces new resource, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
* `group_id` - (Required, String) Specifies the protection group ID
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/r/backup_recovery_protection_group.html.markdown b/website/docs/r/backup_recovery_protection_group.html.markdown
index ffc3a01444..9ecfd47703 100644
--- a/website/docs/r/backup_recovery_protection_group.html.markdown
+++ b/website/docs/r/backup_recovery_protection_group.html.markdown
@@ -310,6 +310,9 @@ Nested schema for **alert_policy**:
* `environment` - (Required, String) Specifies the environment of the Protection Group.
* Constraints: Allowable values are: `kPhysical`, `kSQL`.
* `is_paused` - (Optional, Boolean) Specifies if the the Protection Group is paused. New runs are not scheduled for the paused Protection Groups. Active run if any is not impacted.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `last_modified_timestamp_usecs` - (Optional, Integer) Specifies the last time this protection group was updated. If this is passed into a PUT request, then the backend will validate that the timestamp passed in matches the time that the protection group was actually last modified. If the two timestamps do not match, then the request will be rejected with a stale error.
* `mssql_params` - (Optional, List) Specifies the parameters specific to MSSQL Protection Group.
Nested schema for **mssql_params**:
diff --git a/website/docs/r/backup_recovery_protection_group_run_request.html.markdown b/website/docs/r/backup_recovery_protection_group_run_request.html.markdown
index 8b7de68229..b3b471e4eb 100644
--- a/website/docs/r/backup_recovery_protection_group_run_request.html.markdown
+++ b/website/docs/r/backup_recovery_protection_group_run_request.html.markdown
@@ -190,6 +190,9 @@ Nested schema for **targets_config**:
* Constraints: The default value is `false`.
* `x_ibm_tenant_id` - (Required, Forces new resource, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
* `group_id` - (Required, String) Specifies the protection group ID
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference
diff --git a/website/docs/r/backup_recovery_protection_policy.html.markdown b/website/docs/r/backup_recovery_protection_policy.html.markdown
index 2c211a7f6c..fbb15c36a8 100644
--- a/website/docs/r/backup_recovery_protection_policy.html.markdown
+++ b/website/docs/r/backup_recovery_protection_policy.html.markdown
@@ -1533,6 +1533,9 @@ Nested schema for **cascaded_targets_config**:
* `data_lock` - (Optional, String) This field is now deprecated. Please use the DataLockConfig in the backup retention.
* Constraints: Allowable values are: `Compliance`, `Administrative`.
* `description` - (Optional, String) Specifies the description of the Protection Policy.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `extended_retention` - (Optional, List) Specifies additional retention policies that should be applied to the backup snapshots. A backup snapshot will be retained up to a time that is the maximum of all retention policies that are applicable to it.
Nested schema for **extended_retention**:
* `config_id` - (Optional, String) Specifies the unique identifier for the target getting added. This field need to be passed olny when policies are updated.
diff --git a/website/docs/r/backup_recovery_restore_points.html.markdown b/website/docs/r/backup_recovery_restore_points.html.markdown
index 87734e7c68..e282be8250 100644
--- a/website/docs/r/backup_recovery_restore_points.html.markdown
+++ b/website/docs/r/backup_recovery_restore_points.html.markdown
@@ -31,6 +31,9 @@ resource "ibm_backup_recovery_restore_points" "backup_recovery_restore_points_in
You can specify the following arguments for this resource.
* `end_time_usecs` - (Required, Forces new resource, Integer) Specifies the end time specified as a Unix epoch Timestamp in microseconds.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `environment` - (Required, Forces new resource, String) Specifies the protection source environment type.
* Constraints: Allowable values are: `kVMware`, `kHyperV`, `kAzure`, `kGCP`, `kKVM`, `kAcropolis`, `kAWS`, `kPhysical`, `kGPFS`, `kElastifile`, `kNetapp`, `kGenericNas`, `kIsilon`, `kFlashBlade`, `kPure`, `kIbmFlashSystem`, `kSQL`, `kExchange`, `kAD`, `kOracle`, `kView`, `kRemoteAdapter`, `kO365`, `kKubernetes`, `kCassandra`, `kMongoDB`, `kCouchbase`, `kHdfs`, `kHive`, `kSAPHANA`, `kHBase`, `kUDA`, `kSfdc`.
* `protection_group_ids` - (Required, Forces new resource, List) Specifies the jobs for which to get the full snapshot information.
diff --git a/website/docs/r/backup_recovery_source_registration.html.markdown b/website/docs/r/backup_recovery_source_registration.html.markdown
index e41424dab9..7e4542a09d 100644
--- a/website/docs/r/backup_recovery_source_registration.html.markdown
+++ b/website/docs/r/backup_recovery_source_registration.html.markdown
@@ -56,6 +56,9 @@ Nested schema for **connections**:
* `environment` - (Required, String) Specifies the environment type of the Protection Source.
* Constraints: Allowable values are: `kPhysical`, `kSQL`.
* `name` - (Optional, String) The user specified name for this source.
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
* `physical_params` - (Optional, List) Specifies parameters to register physical server.
Nested schema for **physical_params**:
* `applications` - (Optional, List) Specifies the list of applications to be registered with Physical Source.
diff --git a/website/docs/r/backup_recovery_update_protection_group_run_request.html.markdown b/website/docs/r/backup_recovery_update_protection_group_run_request.html.markdown
index e0fb00b858..bb3fb638f4 100644
--- a/website/docs/r/backup_recovery_update_protection_group_run_request.html.markdown
+++ b/website/docs/r/backup_recovery_update_protection_group_run_request.html.markdown
@@ -167,6 +167,9 @@ Nested schema for **update_protection_group_run_params**:
* Constraints: The value must match regular expression `/^\\d+:\\d+$/`.
* `x_ibm_tenant_id` - (Required, Forces new resource, String) Specifies the key to be used to encrypt the source credential. If includeSourceCredentials is set to true this key must be specified.
* `group_id` - (Required, String) Specifies the protection group ID
+* `endpoint_type` - (Optional, String) Backup Recovery Endpoint type. By default set to "public".
+* `instance_id` - (Optional, String) Backup Recovery instance ID. If provided here along with region, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
+* `region` - (Optional, String) Backup Recovery region. If provided here along with instance_id, the provider constructs the endpoint URL using them, which overrides any value set through environment variables or the `endpoints.json` file.
## Attribute Reference