diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha
index f09edb728..11d6a2a17 100644
--- a/.codegen/_openapi_sha
+++ b/.codegen/_openapi_sha
@@ -1 +1 @@
-c3a3e3055fe11cb9683f398a665c225a03563ff1
\ No newline at end of file
+a91358d1ccad4b1a16a884930ac57c8087d8356f
\ No newline at end of file
diff --git a/.gitattributes b/.gitattributes
index 38a03117f..97a0fa447 100755
--- a/.gitattributes
+++ b/.gitattributes
@@ -65,6 +65,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppPermissions
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceDatabase.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceDatabaseDatabasePermission.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceJob.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceJobJobPermission.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceSecret.java linguist-generated=true
@@ -76,6 +78,9 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceSql
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceUcSecurable.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceUcSecurableUcSecurablePermission.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceUcSecurableUcSecurableType.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatus.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatusUpdateState.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ApplicationState.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ApplicationStatus.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsAPI.java linguist-generated=true
@@ -84,6 +89,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsService.ja
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsSettingsAPI.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsSettingsImpl.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsSettingsService.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AsyncUpdateAppRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeState.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeStatus.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/CreateAppDeploymentRequest.java linguist-generated=true
@@ -97,6 +104,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppPermissi
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppPermissionLevelsResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppPermissionsRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetCustomTemplateRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ListAppDeploymentsRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ListAppDeploymentsResponse.java linguist-generated=true
@@ -192,13 +200,23 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStor
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsService.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreAssignment.java linguist-generated=true
-databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsStorageCredentialInfo.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastore.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignment.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignmentResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredential.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredentialResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ArtifactAllowlistInfo.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ArtifactAllowlistsAPI.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ArtifactAllowlistsImpl.java linguist-generated=true
@@ -241,6 +259,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/Connections
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ContinuousUpdateStatus.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccessRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccessRequestResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateCatalog.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateConnection.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateCredentialRequest.java linguist-generated=true
@@ -581,6 +601,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TemporaryTa
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/TriggeredUpdateStatus.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UnassignRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccessRequestDestinationsRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateCatalog.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateCatalogWorkspaceBindingsResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateConnection.java linguist-generated=true
@@ -979,6 +1001,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MessageS
databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MigrateDashboardRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/PublishRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/PublishedDashboard.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/QueryAttachmentParameter.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/Result.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/Schedule.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/SchedulePauseStatus.java linguist-generated=true
@@ -996,6 +1019,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateData
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseInstanceRoleRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseTableRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateSyncedDatabaseTableRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CustomTag.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseAPI.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseCatalog.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseCredential.java linguist-generated=true
@@ -1050,6 +1074,41 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/SyncedTabl
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/UpdateDatabaseCatalogRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/UpdateDatabaseInstanceRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/UpdateSyncedDatabaseTableRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AggregationGranularity.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AnomalyDetectionConfig.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateMonitorRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateRefreshRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedule.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedulePauseStatus.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingConfig.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetric.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetricType.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingStatus.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityAPI.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceProblemType.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshResponse.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Monitor.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationDestination.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationSettings.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Refresh.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshState.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshTrigger.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/SnapshotConfig.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/TimeSeriesConfig.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateMonitorRequest.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateRefreshRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/AddBlock.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/Close.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/files/Create.java linguist-generated=true
@@ -1990,6 +2049,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/UpdateSta
databricks-sdk-java/src/main/java/com/databricks/sdk/service/pipelines/UpdateStateInfoState.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsCredentials.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsKeyInfo.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureKeyInfo.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureWorkspaceInfo.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CloudResourceContainer.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateAwsKeyInfo.java linguist-generated=true
@@ -2007,7 +2067,9 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Creden
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsImpl.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsService.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingComputeMode.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingGcpCloudResourceContainer.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingStorageMode.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerManagedKey.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteCredentialRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteEncryptionKeyRequest.java linguist-generated=true
@@ -2021,7 +2083,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Encryp
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysService.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EndpointUseCase.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java linguist-generated=true
-databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java linguist-generated=true
@@ -2035,6 +2097,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetVpc
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfigConnectivityType.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworkHealth.java linguist-generated=true
@@ -2064,6 +2127,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEnd
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcStatus.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WarningType.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Workspace.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesImpl.java linguist-generated=true
@@ -2188,6 +2252,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndp
databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsService.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingModelWorkloadType.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/TrafficConfig.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotifications.java linguist-generated=true
+databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotificationsResponse.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateProvisionedThroughputEndpointConfigRequest.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/V1ResponseChoiceElement.java linguist-generated=true
databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/AccountIpAccessEnable.java linguist-generated=true
diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md
index da376b995..d3d0aa6c3 100644
--- a/NEXT_CHANGELOG.md
+++ b/NEXT_CHANGELOG.md
@@ -13,3 +13,83 @@
### Internal Changes
### API Changes
+* Add `com.databricks.sdk.service.dataquality` package.
+* Add `workspaceClient.dataQuality()` service.
+* Add `createUpdate()` and `getUpdate()` methods for `workspaceClient.apps()` service.
+* Add `updateNotifications()` method for `workspaceClient.servingEndpoints()` service.
+* Add `computeSize` field for `com.databricks.sdk.service.apps.App`.
+* Add `genieSpace` field for `com.databricks.sdk.service.apps.AppResource`.
+* Add `skipValidation` field for `com.databricks.sdk.service.catalog.AccountsCreateStorageCredential`.
+* Add `skipValidation` field for `com.databricks.sdk.service.catalog.AccountsUpdateStorageCredential`.
+* Add `aliases`, `browseOnly`, `createdAt`, `createdBy`, `fullName`, `metastoreId`, `owner`, `updatedAt` and `updatedBy` fields for `com.databricks.sdk.service.catalog.CreateRegisteredModelRequest`.
+* Add `catalogName`, `id`, `modelName` and `schemaName` fields for `com.databricks.sdk.service.catalog.RegisteredModelAlias`.
+* Add `aliases`, `catalogName`, `createdAt`, `createdBy`, `id`, `metastoreId`, `modelName`, `modelVersionDependencies`, `runId`, `runWorkspaceId`, `schemaName`, `source`, `status`, `storageLocation`, `updatedAt` and `updatedBy` fields for `com.databricks.sdk.service.catalog.UpdateModelVersionRequest`.
+* Add `aliases`, `browseOnly`, `catalogName`, `createdAt`, `createdBy`, `metastoreId`, `name`, `schemaName`, `storageLocation`, `updatedAt` and `updatedBy` fields for `com.databricks.sdk.service.catalog.UpdateRegisteredModelRequest`.
+* Add `parameters` field for `com.databricks.sdk.service.dashboards.GenieQueryAttachment`.
+* Add `databaseInstanceName` field for `com.databricks.sdk.service.database.CreateDatabaseInstanceRoleRequest`.
+* Add `customTags`, `effectiveCustomTags`, `effectiveUsagePolicyId` and `usagePolicyId` fields for `com.databricks.sdk.service.database.DatabaseInstance`.
+* Add `effectiveAttributes` and `instanceName` fields for `com.databricks.sdk.service.database.DatabaseInstanceRole`.
+* Add `keyRegion` field for `com.databricks.sdk.service.provisioning.CreateAwsKeyInfo`.
+* Add `roleArn` field for `com.databricks.sdk.service.provisioning.CreateStorageConfigurationRequest`.
+* Add `azureKeyInfo` field for `com.databricks.sdk.service.provisioning.CustomerManagedKey`.
+* [Breaking] Add `customerFacingPrivateAccessSettings` field for `com.databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`.
+* Add `roleArn` field for `com.databricks.sdk.service.provisioning.StorageConfiguration`.
+* [Breaking] Add `customerFacingWorkspace` field for `com.databricks.sdk.service.provisioning.UpdateWorkspaceRequest`.
+* Add `updateMask` field for `com.databricks.sdk.service.provisioning.UpdateWorkspaceRequest`.
+* Add `computeMode`, `network`, `networkConnectivityConfigId` and `storageMode` fields for `com.databricks.sdk.service.provisioning.Workspace`.
+* Add `enableServerlessCompute` field for `com.databricks.sdk.service.sql.GetWorkspaceWarehouseConfigResponse`.
+* Add `pageSize` and `pageToken` fields for `com.databricks.sdk.service.sql.ListWarehousesRequest`.
+* Add `nextPageToken` field for `com.databricks.sdk.service.sql.ListWarehousesResponse`.
+* Add `enableServerlessCompute` field for `com.databricks.sdk.service.sql.SetWorkspaceWarehouseConfigRequest`.
+* Add `MODEL_VERSION_STATUS_UNKNOWN` enum value for `com.databricks.sdk.service.catalog.ModelVersionInfoStatus`.
+* Add `EXTERNAL_USE_SCHEMA` enum value for `com.databricks.sdk.service.catalog.Privilege`.
+* Add `STREAM_NATIVE` enum value for `com.databricks.sdk.service.catalog.SystemType`.
+* Add `K8S_ACTIVE_POD_QUOTA_EXCEEDED` and `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED` enum values for `com.databricks.sdk.service.compute.TerminationReasonCode`.
+* Add `EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION`, `INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION` and `INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION` enum values for `com.databricks.sdk.service.dashboards.MessageErrorType`.
+* Add `SSH_BOOTSTRAP_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `INIT_CONTAINER_NOT_FINISHED`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `INVALID_WORKER_IMAGE_FAILURE`, `WORKSPACE_UPDATE`, `INVALID_AWS_PARAMETER`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_UNEXPECTED_FAILURE`, `UNEXPECTED_POD_RECREATION`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_KMS_KEY_PERMISSION_DENIED`, `DRIVER_EVICTION`, `USER_INITIATED_VM_TERMINATION`, `GCP_IAM_TIMEOUT`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `AWS_INVALID_KEY_PAIR`, `DRIVER_POD_CREATION_FAILURE`, `MAINTENANCE_MODE`, `INTERNAL_CAPACITY_FAILURE`, `EXECUTOR_POD_UNSCHEDULED`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `AWS_INVALID_KMS_KEY_STATE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_FORBIDDEN`, `GCP_NOT_FOUND`, `RESOURCE_USAGE_BLOCKED`, `DATA_ACCESS_CONFIG_CHANGED`, `ACCESS_TOKEN_FAILURE`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `IN_PENALTY_BOX`, `DISASTER_RECOVERY_REPLICATION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `GCP_SUBNET_NOT_READY`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `EOS_SPARK_IMAGE`, `NO_MATCHED_K8S`, `LAZY_ALLOCATION_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `SECRET_CREATION_FAILURE`, `POD_SCHEDULING_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `NETVISOR_SETUP_TIMEOUT`, `NO_MATCHED_K8S_TESTING_TAG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `GKE_BASED_CLUSTER_TERMINATION`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `DOCKER_INVALID_OS_EXCEPTION`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DNS_RESOLUTION_ERROR`, `GCP_DENIED_BY_ORG_POLICY`, `SECRET_PERMISSION_DENIED`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `DRIVER_UNHEALTHY`, `SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION`, `DRIVER_DNS_RESOLUTION_FAILURE`, `NO_ACTIVATED_K8S`, `USAGE_POLICY_ENTITLEMENT_DENIED`, `NO_ACTIVATED_K8S_TESTING_TAG`, `K8S_ACTIVE_POD_QUOTA_EXCEEDED` and `CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED` enum values for `com.databricks.sdk.service.sql.TerminationReasonCode`.
+* [Breaking] Change `create()` method for `accountClient.accountMetastoreAssignments()` service to start returning `com.databricks.sdk.service.catalog.AccountsCreateMetastoreAssignmentResponse` class.
+* [Breaking] Change `delete()` method for `accountClient.accountMetastoreAssignments()` service to start returning `com.databricks.sdk.service.catalog.AccountsDeleteMetastoreAssignmentResponse` class.
+* [Breaking] Change `update()` method for `accountClient.accountMetastoreAssignments()` service to start returning `com.databricks.sdk.service.catalog.AccountsUpdateMetastoreAssignmentResponse` class.
+* [Breaking] Change `create()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsCreateMetastoreResponse` class.
+* [Breaking] Change `delete()` method for `accountClient.accountMetastores()` service to start returning `com.databricks.sdk.service.catalog.AccountsDeleteMetastoreResponse` class.
+* [Breaking] Change `get()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsGetMetastoreResponse` class.
+* [Breaking] Change `list()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsListMetastoresResponse` class.
+* [Breaking] Change `update()` method for `accountClient.accountMetastores()` service to return `com.databricks.sdk.service.catalog.AccountsUpdateMetastoreResponse` class.
+* [Breaking] Change `create()` method for `accountClient.accountStorageCredentials()` service to return `com.databricks.sdk.service.catalog.AccountsCreateStorageCredentialInfo` class.
+* [Breaking] Change `delete()` method for `accountClient.accountStorageCredentials()` service to start returning `com.databricks.sdk.service.catalog.AccountsDeleteStorageCredentialResponse` class.
+* [Breaking] Change `update()` method for `accountClient.accountStorageCredentials()` service to return `com.databricks.sdk.service.catalog.AccountsUpdateStorageCredentialResponse` class.
+* [Breaking] Change `create()` method for `workspaceClient.registeredModels()` service with new required argument order.
+* [Breaking] Change `delete()` method for `accountClient.credentials()` service to start returning `com.databricks.sdk.service.provisioning.Credential` class.
+* [Breaking] Change `delete()` method for `accountClient.encryptionKeys()` service to start returning `com.databricks.sdk.service.provisioning.CustomerManagedKey` class.
+* [Breaking] Change `create()` method for `accountClient.networks()` service with new required argument order.
+* [Breaking] Change `delete()` method for `accountClient.networks()` service to start returning `com.databricks.sdk.service.provisioning.Network` class.
+* [Breaking] Change `create()` and `replace()` methods for `accountClient.privateAccess()` service with new required argument order.
+* [Breaking] Change `delete()` and `replace()` methods for `accountClient.privateAccess()` service to start returning `com.databricks.sdk.service.provisioning.PrivateAccessSettings` class.
+* [Breaking] Change `delete()` method for `accountClient.storage()` service to start returning `com.databricks.sdk.service.provisioning.StorageConfiguration` class.
+* [Breaking] Change `create()` method for `accountClient.vpcEndpoints()` service with new required argument order.
+* [Breaking] Change `delete()` method for `accountClient.vpcEndpoints()` service to start returning `com.databricks.sdk.service.provisioning.VpcEndpoint` class.
+* [Breaking] Change `create()` and `update()` methods for `accountClient.workspaces()` service with new required argument order.
+* [Breaking] Change `delete()` and `update()` methods for `accountClient.workspaces()` service to start returning `com.databricks.sdk.service.provisioning.Workspace` class.
+* [Breaking] Change `executeStatement()` method for `workspaceClient.statementExecution()` service . Method path has changed.
+* [Breaking] Change `metastoreInfo` field for `com.databricks.sdk.service.catalog.AccountsCreateMetastore` to type `com.databricks.sdk.service.catalog.CreateAccountsMetastore` class.
+* [Breaking] Change `credentialInfo` field for `com.databricks.sdk.service.catalog.AccountsCreateStorageCredential` to type `com.databricks.sdk.service.catalog.CreateAccountsStorageCredential` class.
+* [Breaking] Change `metastoreInfo` field for `com.databricks.sdk.service.catalog.AccountsUpdateMetastore` to type `com.databricks.sdk.service.catalog.UpdateAccountsMetastore` class.
+* [Breaking] Change `credentialInfo` field for `com.databricks.sdk.service.catalog.AccountsUpdateStorageCredential` to type `com.databricks.sdk.service.catalog.UpdateAccountsStorageCredential` class.
+* Change `catalogName`, `name` and `schemaName` fields for `com.databricks.sdk.service.catalog.CreateRegisteredModelRequest` to no longer be required.
+* Change `name` field for `com.databricks.sdk.service.database.DatabaseInstanceRole` to be required.
+* [Breaking] Change `name` field for `com.databricks.sdk.service.database.DatabaseInstanceRole` to be required.
+* Change `networkName` field for `com.databricks.sdk.service.provisioning.CreateNetworkRequest` to no longer be required.
+* Change `privateAccessSettingsName` and `region` fields for `com.databricks.sdk.service.provisioning.CreatePrivateAccessSettingsRequest` to no longer be required.
+* Change `vpcEndpointName` field for `com.databricks.sdk.service.provisioning.CreateVpcEndpointRequest` to no longer be required.
+* Change `workspaceName` field for `com.databricks.sdk.service.provisioning.CreateWorkspaceRequest` to no longer be required.
+* Change `dataplaneRelay` and `restApi` fields for `com.databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required.
+* [Breaking] Change `dataplaneRelay` and `restApi` fields for `com.databricks.sdk.service.provisioning.NetworkVpcEndpoints` to no longer be required.
+* [Breaking] Change waiter for `accountClient.workspaces().update()` method.
+* [Breaking] Remove `browseOnly` field for `com.databricks.sdk.service.catalog.ModelVersionInfo`.
+* [Breaking] Remove `jarDependencies` field for `com.databricks.sdk.service.compute.Environment`.
+* [Breaking] Remove `isNoPublicIpEnabled` field for `com.databricks.sdk.service.provisioning.CreateWorkspaceRequest`.
+* [Breaking] Remove `allowedVpcEndpointIds`, `privateAccessLevel`, `privateAccessSettingsName`, `publicAccessEnabled` and `region` fields for `com.databricks.sdk.service.provisioning.ReplacePrivateAccessSettingsRequest`.
+* [Breaking] Remove `externalId` field for `com.databricks.sdk.service.provisioning.StsRole`.
+* [Breaking] Remove `awsRegion`, `credentialsId`, `customTags`, `managedServicesCustomerManagedKeyId`, `networkConnectivityConfigId`, `networkId`, `privateAccessSettingsId`, `storageConfigurationId` and `storageCustomerManagedKeyId` fields for `com.databricks.sdk.service.provisioning.UpdateWorkspaceRequest`.
+* [Breaking] Remove `externalCustomerInfo` and `isNoPublicIpEnabled` fields for `com.databricks.sdk.service.provisioning.Workspace`.
+* [Breaking] Remove `STATUS_UNSPECIFIED` enum value for `com.databricks.sdk.service.sql.Status`.
\ No newline at end of file
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java
index bac85f5a9..e0c6ccce2 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java
@@ -103,6 +103,8 @@
import com.databricks.sdk.service.dashboards.LakeviewService;
import com.databricks.sdk.service.database.DatabaseAPI;
import com.databricks.sdk.service.database.DatabaseService;
+import com.databricks.sdk.service.dataquality.DataQualityAPI;
+import com.databricks.sdk.service.dataquality.DataQualityService;
import com.databricks.sdk.service.files.DbfsService;
import com.databricks.sdk.service.files.FilesAPI;
import com.databricks.sdk.service.files.FilesService;
@@ -286,6 +288,7 @@ public class WorkspaceClient {
private CurrentUserAPI currentUserAPI;
private DashboardWidgetsAPI dashboardWidgetsAPI;
private DashboardsAPI dashboardsAPI;
+ private DataQualityAPI dataQualityAPI;
private DataSourcesAPI dataSourcesAPI;
private DatabaseAPI databaseAPI;
private DbfsExt dbfsAPI;
@@ -416,6 +419,7 @@ public WorkspaceClient(DatabricksConfig config) {
currentUserAPI = new CurrentUserAPI(apiClient);
dashboardWidgetsAPI = new DashboardWidgetsAPI(apiClient);
dashboardsAPI = new DashboardsAPI(apiClient);
+ dataQualityAPI = new DataQualityAPI(apiClient);
dataSourcesAPI = new DataSourcesAPI(apiClient);
databaseAPI = new DatabaseAPI(apiClient);
dbfsAPI = new DbfsExt(apiClient);
@@ -801,6 +805,11 @@ public DashboardsAPI dashboards() {
return dashboardsAPI;
}
+ /** Manage the data quality of Unity Catalog objects (currently support `schema` and `table`) */
+ public DataQualityAPI dataQuality() {
+ return dataQualityAPI;
+ }
+
/**
* This API is provided to assist you in making new query objects. When creating a query object,
* you may optionally specify a `data_source_id` for the SQL warehouse against which it will run.
@@ -1555,8 +1564,8 @@ public RedashConfigAPI redashConfig() {
* version metadata (comments, aliases) create a new model version, or update permissions on the
* registered model, users must be owners of the registered model.
*
- *
Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging,
- * grants) that specify a securable type, use "FUNCTION" as the securable type.
+ *
Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants)
+ * that specify a securable type, use FUNCTION as the securable type.
*/
public RegisteredModelsAPI registeredModels() {
return registeredModelsAPI;
@@ -1727,16 +1736,16 @@ public SharesAPI shares() {
* has not yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode,
* or it can be set to `CANCEL`, which cancels the statement.
*
- *
In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call
- * waits up to 30 seconds; if the statement execution finishes within this time, the result data
- * is returned directly in the response. If the execution takes longer than 30 seconds, the
- * execution is canceled and the call returns with a `CANCELED` state. - Asynchronous mode -
- * `wait_timeout=0s` (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to
- * finish but returns directly with a statement ID. The status of the statement execution can be
- * polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
+ *
In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The
+ * call waits up to 30 seconds; if the statement execution finishes within this time, the result
+ * data is returned directly in the response. If the execution takes longer than 30 seconds, the
+ * execution is canceled and the call returns with a `CANCELED` state. - **Asynchronous mode**
+ * (`wait_timeout=0s` and `on_wait_timeout` is ignored): The call doesn't wait for the statement
+ * to finish but returns directly with a statement ID. The status of the statement execution can
+ * be polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
* execution has succeeded, this call also returns the result and metadata in the response. -
- * Hybrid mode (default) - `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for
- * up to 10 seconds; if the statement execution finishes within this time, the result data is
+ * **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits
+ * for up to 10 seconds; if the statement execution finishes within this time, the result data is
* returned directly in the response. If the execution takes longer than 10 seconds, a statement
* ID is returned. The statement ID can be used to fetch status and results in the same way as in
* the asynchronous mode.
@@ -2409,6 +2418,17 @@ public WorkspaceClient withDashboardsAPI(DashboardsAPI dashboards) {
return this;
}
+ /** Replace the default DataQualityService with a custom implementation. */
+ public WorkspaceClient withDataQualityImpl(DataQualityService dataQuality) {
+ return this.withDataQualityAPI(new DataQualityAPI(dataQuality));
+ }
+
+ /** Replace the default DataQualityAPI with a custom implementation. */
+ public WorkspaceClient withDataQualityAPI(DataQualityAPI dataQuality) {
+ this.dataQualityAPI = dataQuality;
+ return this;
+ }
+
/** Replace the default DataSourcesService with a custom implementation. */
public WorkspaceClient withDataSourcesImpl(DataSourcesService dataSources) {
return this.withDataSourcesAPI(new DataSourcesAPI(dataSources));
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java
index 97b6f3b19..97dda72ac 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/App.java
@@ -25,6 +25,10 @@ public class App {
@JsonProperty("budget_policy_id")
private String budgetPolicyId;
+ /** */
+ @JsonProperty("compute_size")
+ private ComputeSize computeSize;
+
/** */
@JsonProperty("compute_status")
private ComputeStatus computeStatus;
@@ -141,6 +145,15 @@ public String getBudgetPolicyId() {
return budgetPolicyId;
}
+ public App setComputeSize(ComputeSize computeSize) {
+ this.computeSize = computeSize;
+ return this;
+ }
+
+ public ComputeSize getComputeSize() {
+ return computeSize;
+ }
+
public App setComputeStatus(ComputeStatus computeStatus) {
this.computeStatus = computeStatus;
return this;
@@ -329,6 +342,7 @@ public boolean equals(Object o) {
return Objects.equals(activeDeployment, that.activeDeployment)
&& Objects.equals(appStatus, that.appStatus)
&& Objects.equals(budgetPolicyId, that.budgetPolicyId)
+ && Objects.equals(computeSize, that.computeSize)
&& Objects.equals(computeStatus, that.computeStatus)
&& Objects.equals(createTime, that.createTime)
&& Objects.equals(creator, that.creator)
@@ -357,6 +371,7 @@ public int hashCode() {
activeDeployment,
appStatus,
budgetPolicyId,
+ computeSize,
computeStatus,
createTime,
creator,
@@ -385,6 +400,7 @@ public String toString() {
.add("activeDeployment", activeDeployment)
.add("appStatus", appStatus)
.add("budgetPolicyId", budgetPolicyId)
+ .add("computeSize", computeSize)
.add("computeStatus", computeStatus)
.add("createTime", createTime)
.add("creator", creator)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java
index 1e8acf263..2761c1651 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResource.java
@@ -17,6 +17,10 @@ public class AppResource {
@JsonProperty("description")
private String description;
+ /** */
+ @JsonProperty("genie_space")
+ private AppResourceGenieSpace genieSpace;
+
/** */
@JsonProperty("job")
private AppResourceJob job;
@@ -59,6 +63,15 @@ public String getDescription() {
return description;
}
+ public AppResource setGenieSpace(AppResourceGenieSpace genieSpace) {
+ this.genieSpace = genieSpace;
+ return this;
+ }
+
+ public AppResourceGenieSpace getGenieSpace() {
+ return genieSpace;
+ }
+
public AppResource setJob(AppResourceJob job) {
this.job = job;
return this;
@@ -120,6 +133,7 @@ public boolean equals(Object o) {
AppResource that = (AppResource) o;
return Objects.equals(database, that.database)
&& Objects.equals(description, that.description)
+ && Objects.equals(genieSpace, that.genieSpace)
&& Objects.equals(job, that.job)
&& Objects.equals(name, that.name)
&& Objects.equals(secret, that.secret)
@@ -131,7 +145,15 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
return Objects.hash(
- database, description, job, name, secret, servingEndpoint, sqlWarehouse, ucSecurable);
+ database,
+ description,
+ genieSpace,
+ job,
+ name,
+ secret,
+ servingEndpoint,
+ sqlWarehouse,
+ ucSecurable);
}
@Override
@@ -139,6 +161,7 @@ public String toString() {
return new ToStringer(AppResource.class)
.add("database", database)
.add("description", description)
+ .add("genieSpace", genieSpace)
.add("job", job)
.add("name", name)
.add("secret", secret)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java
new file mode 100755
index 000000000..00045cdbd
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpace.java
@@ -0,0 +1,74 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AppResourceGenieSpace {
+ /** */
+ @JsonProperty("name")
+ private String name;
+
+ /** */
+ @JsonProperty("permission")
+ private AppResourceGenieSpaceGenieSpacePermission permission;
+
+ /** */
+ @JsonProperty("space_id")
+ private String spaceId;
+
+ public AppResourceGenieSpace setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public AppResourceGenieSpace setPermission(AppResourceGenieSpaceGenieSpacePermission permission) {
+ this.permission = permission;
+ return this;
+ }
+
+ public AppResourceGenieSpaceGenieSpacePermission getPermission() {
+ return permission;
+ }
+
+ public AppResourceGenieSpace setSpaceId(String spaceId) {
+ this.spaceId = spaceId;
+ return this;
+ }
+
+ public String getSpaceId() {
+ return spaceId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AppResourceGenieSpace that = (AppResourceGenieSpace) o;
+ return Objects.equals(name, that.name)
+ && Objects.equals(permission, that.permission)
+ && Objects.equals(spaceId, that.spaceId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, permission, spaceId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AppResourceGenieSpace.class)
+ .add("name", name)
+ .add("permission", permission)
+ .add("spaceId", spaceId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java
new file mode 100755
index 000000000..c93785cc3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppResourceGenieSpaceGenieSpacePermission.java
@@ -0,0 +1,13 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+
+@Generated
+public enum AppResourceGenieSpaceGenieSpacePermission {
+ CAN_EDIT,
+ CAN_MANAGE,
+ CAN_RUN,
+ CAN_VIEW,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java
new file mode 100755
index 000000000..b34c390e7
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdate.java
@@ -0,0 +1,136 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+@Generated
+public class AppUpdate {
+ /** */
+ @JsonProperty("budget_policy_id")
+ private String budgetPolicyId;
+
+ /** */
+ @JsonProperty("compute_size")
+ private ComputeSize computeSize;
+
+ /** */
+ @JsonProperty("description")
+ private String description;
+
+ /** */
+ @JsonProperty("resources")
+ private Collection resources;
+
+ /** */
+ @JsonProperty("status")
+ private AppUpdateUpdateStatus status;
+
+ /** */
+ @JsonProperty("usage_policy_id")
+ private String usagePolicyId;
+
+ /** */
+ @JsonProperty("user_api_scopes")
+ private Collection userApiScopes;
+
+ public AppUpdate setBudgetPolicyId(String budgetPolicyId) {
+ this.budgetPolicyId = budgetPolicyId;
+ return this;
+ }
+
+ public String getBudgetPolicyId() {
+ return budgetPolicyId;
+ }
+
+ public AppUpdate setComputeSize(ComputeSize computeSize) {
+ this.computeSize = computeSize;
+ return this;
+ }
+
+ public ComputeSize getComputeSize() {
+ return computeSize;
+ }
+
+ public AppUpdate setDescription(String description) {
+ this.description = description;
+ return this;
+ }
+
+ public String getDescription() {
+ return description;
+ }
+
+ public AppUpdate setResources(Collection resources) {
+ this.resources = resources;
+ return this;
+ }
+
+ public Collection getResources() {
+ return resources;
+ }
+
+ public AppUpdate setStatus(AppUpdateUpdateStatus status) {
+ this.status = status;
+ return this;
+ }
+
+ public AppUpdateUpdateStatus getStatus() {
+ return status;
+ }
+
+ public AppUpdate setUsagePolicyId(String usagePolicyId) {
+ this.usagePolicyId = usagePolicyId;
+ return this;
+ }
+
+ public String getUsagePolicyId() {
+ return usagePolicyId;
+ }
+
+ public AppUpdate setUserApiScopes(Collection userApiScopes) {
+ this.userApiScopes = userApiScopes;
+ return this;
+ }
+
+ public Collection getUserApiScopes() {
+ return userApiScopes;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AppUpdate that = (AppUpdate) o;
+ return Objects.equals(budgetPolicyId, that.budgetPolicyId)
+ && Objects.equals(computeSize, that.computeSize)
+ && Objects.equals(description, that.description)
+ && Objects.equals(resources, that.resources)
+ && Objects.equals(status, that.status)
+ && Objects.equals(usagePolicyId, that.usagePolicyId)
+ && Objects.equals(userApiScopes, that.userApiScopes);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ budgetPolicyId, computeSize, description, resources, status, usagePolicyId, userApiScopes);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AppUpdate.class)
+ .add("budgetPolicyId", budgetPolicyId)
+ .add("computeSize", computeSize)
+ .add("description", description)
+ .add("resources", resources)
+ .add("status", status)
+ .add("usagePolicyId", usagePolicyId)
+ .add("userApiScopes", userApiScopes)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatus.java
new file mode 100755
index 000000000..d666a314b
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatus.java
@@ -0,0 +1,58 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AppUpdateUpdateStatus {
+ /** */
+ @JsonProperty("message")
+ private String message;
+
+ /** */
+ @JsonProperty("state")
+ private AppUpdateUpdateStatusUpdateState state;
+
+ public AppUpdateUpdateStatus setMessage(String message) {
+ this.message = message;
+ return this;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public AppUpdateUpdateStatus setState(AppUpdateUpdateStatusUpdateState state) {
+ this.state = state;
+ return this;
+ }
+
+ public AppUpdateUpdateStatusUpdateState getState() {
+ return state;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AppUpdateUpdateStatus that = (AppUpdateUpdateStatus) o;
+ return Objects.equals(message, that.message) && Objects.equals(state, that.state);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(message, state);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AppUpdateUpdateStatus.class)
+ .add("message", message)
+ .add("state", state)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatusUpdateState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatusUpdateState.java
new file mode 100755
index 000000000..d86091f69
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppUpdateUpdateStatusUpdateState.java
@@ -0,0 +1,13 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+
+@Generated
+public enum AppUpdateUpdateStatusUpdateState {
+ FAILED,
+ IN_PROGRESS,
+ NOT_UPDATED,
+ SUCCEEDED,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsAPI.java
index 1394408c5..dee0d4745 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsAPI.java
@@ -81,6 +81,55 @@ public App waitGetAppActive(String name, Duration timeout, Consumer callbac
throw new TimeoutException(String.format("timed out after %s: %s", timeout, statusMessage));
}
+ public AppUpdate waitGetUpdateAppSucceeded(String appName) throws TimeoutException {
+ return waitGetUpdateAppSucceeded(appName, Duration.ofMinutes(20), null);
+ }
+
+ public AppUpdate waitGetUpdateAppSucceeded(
+ String appName, Duration timeout, Consumer callback) throws TimeoutException {
+ long deadline = System.currentTimeMillis() + timeout.toMillis();
+ java.util.List targetStates =
+ Arrays.asList(AppUpdateUpdateStatusUpdateState.SUCCEEDED);
+ java.util.List failureStates =
+ Arrays.asList(AppUpdateUpdateStatusUpdateState.FAILED);
+ String statusMessage = "polling...";
+ int attempt = 1;
+ while (System.currentTimeMillis() < deadline) {
+ AppUpdate poll = getUpdate(new GetAppUpdateRequest().setAppName(appName));
+ AppUpdateUpdateStatusUpdateState status = poll.getStatus().getState();
+ statusMessage = String.format("current status: %s", status);
+ if (poll.getStatus() != null) {
+ statusMessage = poll.getStatus().getMessage();
+ }
+ if (targetStates.contains(status)) {
+ return poll;
+ }
+ if (callback != null) {
+ callback.accept(poll);
+ }
+ if (failureStates.contains(status)) {
+ String msg = String.format("failed to reach SUCCEEDED, got %s: %s", status, statusMessage);
+ throw new IllegalStateException(msg);
+ }
+
+ String prefix = String.format("appName=%s", appName);
+ int sleep = attempt;
+ if (sleep > 10) {
+ // sleep 10s max per attempt
+ sleep = 10;
+ }
+ LOG.info("{}: ({}) {} (sleeping ~{}s)", prefix, status, statusMessage, sleep);
+ try {
+ Thread.sleep((long) (sleep * 1000L + Math.random() * 1000));
+ } catch (InterruptedException e) {
+ Thread.currentThread().interrupt();
+ throw new DatabricksException("Current thread was interrupted", e);
+ }
+ attempt++;
+ }
+ throw new TimeoutException(String.format("timed out after %s: %s", timeout, statusMessage));
+ }
+
public AppDeployment waitGetDeploymentAppSucceeded(String appName, String deploymentId)
throws TimeoutException {
return waitGetDeploymentAppSucceeded(appName, deploymentId, Duration.ofMinutes(20), null);
@@ -186,6 +235,17 @@ public Wait create(CreateAppRequest request) {
(timeout, callback) -> waitGetAppActive(response.getName(), timeout, callback), response);
}
+ /**
+ * Creates an app update and starts the update process. The update process is asynchronous and the
+ * status of the update can be checked with the GetAppUpdate method.
+ */
+ public Wait createUpdate(AsyncUpdateAppRequest request) {
+ AppUpdate response = impl.createUpdate(request);
+ return new Wait<>(
+ (timeout, callback) -> waitGetUpdateAppSucceeded(request.getAppName(), timeout, callback),
+ response);
+ }
+
public App delete(String name) {
return delete(new DeleteAppRequest().setName(name));
}
@@ -242,6 +302,15 @@ public AppPermissions getPermissions(GetAppPermissionsRequest request) {
return impl.getPermissions(request);
}
+ public AppUpdate getUpdate(String appName) {
+ return getUpdate(new GetAppUpdateRequest().setAppName(appName));
+ }
+
+ /** Gets the status of an app update. */
+ public AppUpdate getUpdate(GetAppUpdateRequest request) {
+ return impl.getUpdate(request);
+ }
+
/** Lists all apps in the workspace. */
public Iterable list(ListAppsRequest request) {
return new Paginator<>(
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsImpl.java
index 0e6ec9ff6..047d71e54 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsImpl.java
@@ -30,6 +30,20 @@ public App create(CreateAppRequest request) {
}
}
+ @Override
+ public AppUpdate createUpdate(AsyncUpdateAppRequest request) {
+ String path = String.format("/api/2.0/apps/%s/update", request.getAppName());
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, AppUpdate.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
@Override
public App delete(DeleteAppRequest request) {
String path = String.format("/api/2.0/apps/%s", request.getName());
@@ -112,6 +126,19 @@ public AppPermissions getPermissions(GetAppPermissionsRequest request) {
}
}
+ @Override
+ public AppUpdate getUpdate(GetAppUpdateRequest request) {
+ String path = String.format("/api/2.0/apps/%s/update", request.getAppName());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, AppUpdate.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
@Override
public ListAppsResponse list(ListAppsRequest request) {
String path = "/api/2.0/apps";
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsService.java
index 9e5b895bf..31742387d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AppsService.java
@@ -16,6 +16,12 @@ public interface AppsService {
/** Creates a new app. */
App create(CreateAppRequest createAppRequest);
+ /**
+ * Creates an app update and starts the update process. The update process is asynchronous and the
+ * status of the update can be checked with the GetAppUpdate method.
+ */
+ AppUpdate createUpdate(AsyncUpdateAppRequest asyncUpdateAppRequest);
+
/** Deletes an app. */
App delete(DeleteAppRequest deleteAppRequest);
@@ -35,6 +41,9 @@ GetAppPermissionLevelsResponse getPermissionLevels(
/** Gets the permissions of an app. Apps can inherit permissions from their root object. */
AppPermissions getPermissions(GetAppPermissionsRequest getAppPermissionsRequest);
+ /** Gets the status of an app update. */
+ AppUpdate getUpdate(GetAppUpdateRequest getAppUpdateRequest);
+
/** Lists all apps in the workspace. */
ListAppsResponse list(ListAppsRequest listAppsRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AsyncUpdateAppRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AsyncUpdateAppRequest.java
new file mode 100755
index 000000000..136f919eb
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/AsyncUpdateAppRequest.java
@@ -0,0 +1,84 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AsyncUpdateAppRequest {
+ /** */
+ @JsonProperty("app")
+ private App app;
+
+ /** */
+ @JsonIgnore private String appName;
+
+ /**
+ * The field mask must be a single string, with multiple fields separated by commas (no spaces).
+ * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
+ * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not
+ * allowed, as only the entire collection field can be specified. Field names must exactly match
+ * the resource field names.
+ *
+ * A field mask of `*` indicates full replacement. It’s recommended to always explicitly list
+ * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if
+ * the API changes in the future.
+ */
+ @JsonProperty("update_mask")
+ private String updateMask;
+
+ public AsyncUpdateAppRequest setApp(App app) {
+ this.app = app;
+ return this;
+ }
+
+ public App getApp() {
+ return app;
+ }
+
+ public AsyncUpdateAppRequest setAppName(String appName) {
+ this.appName = appName;
+ return this;
+ }
+
+ public String getAppName() {
+ return appName;
+ }
+
+ public AsyncUpdateAppRequest setUpdateMask(String updateMask) {
+ this.updateMask = updateMask;
+ return this;
+ }
+
+ public String getUpdateMask() {
+ return updateMask;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AsyncUpdateAppRequest that = (AsyncUpdateAppRequest) o;
+ return Objects.equals(app, that.app)
+ && Objects.equals(appName, that.appName)
+ && Objects.equals(updateMask, that.updateMask);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(app, appName, updateMask);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AsyncUpdateAppRequest.class)
+ .add("app", app)
+ .add("appName", appName)
+ .add("updateMask", updateMask)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java
new file mode 100755
index 000000000..ff5b63350
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/ComputeSize.java
@@ -0,0 +1,12 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+
+@Generated
+public enum ComputeSize {
+ LARGE,
+ LIQUID,
+ MEDIUM,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java
new file mode 100755
index 000000000..152df04b8
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/apps/GetAppUpdateRequest.java
@@ -0,0 +1,41 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.apps;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class GetAppUpdateRequest {
+ /** The name of the app. */
+ @JsonIgnore private String appName;
+
+ public GetAppUpdateRequest setAppName(String appName) {
+ this.appName = appName;
+ return this;
+ }
+
+ public String getAppName() {
+ return appName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetAppUpdateRequest that = (GetAppUpdateRequest) o;
+ return Objects.equals(appName, that.appName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(appName);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetAppUpdateRequest.class).add("appName", appName).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java
index 61feaf2f3..6e1cd7c9e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsAPI.java
@@ -25,20 +25,22 @@ public AccountMetastoreAssignmentsAPI(AccountMetastoreAssignmentsService mock) {
}
/** Creates an assignment to a metastore for a workspace */
- public void create(AccountsCreateMetastoreAssignment request) {
- impl.create(request);
+ public AccountsCreateMetastoreAssignmentResponse create(
+ AccountsCreateMetastoreAssignment request) {
+ return impl.create(request);
}
- public void delete(long workspaceId, String metastoreId) {
- delete(
+ public AccountsDeleteMetastoreAssignmentResponse delete(long workspaceId, String metastoreId) {
+ return delete(
new DeleteAccountMetastoreAssignmentRequest()
.setWorkspaceId(workspaceId)
.setMetastoreId(metastoreId));
}
/** Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. */
- public void delete(DeleteAccountMetastoreAssignmentRequest request) {
- impl.delete(request);
+ public AccountsDeleteMetastoreAssignmentResponse delete(
+ DeleteAccountMetastoreAssignmentRequest request) {
+ return impl.delete(request);
}
public AccountsMetastoreAssignment get(long workspaceId) {
@@ -47,7 +49,7 @@ public AccountsMetastoreAssignment get(long workspaceId) {
/**
* Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is
- * assigned a metastore, the mappig will be returned. If no metastore is assigned to the
+ * assigned a metastore, the mapping will be returned. If no metastore is assigned to the
* workspace, the assignment will not be found and a 404 returned.
*/
public AccountsMetastoreAssignment get(GetAccountMetastoreAssignmentRequest request) {
@@ -71,8 +73,9 @@ public Iterable list(ListAccountMetastoreAssignmentsRequest request) {
* Updates an assignment to a metastore for a workspace. Currently, only the default catalog may
* be updated.
*/
- public void update(AccountsUpdateMetastoreAssignment request) {
- impl.update(request);
+ public AccountsUpdateMetastoreAssignmentResponse update(
+ AccountsUpdateMetastoreAssignment request) {
+ return impl.update(request);
}
public AccountMetastoreAssignmentsService impl() {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsImpl.java
index f0418c91e..d9696ba4d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsImpl.java
@@ -17,7 +17,8 @@ public AccountMetastoreAssignmentsImpl(ApiClient apiClient) {
}
@Override
- public void create(AccountsCreateMetastoreAssignment request) {
+ public AccountsCreateMetastoreAssignmentResponse create(
+ AccountsCreateMetastoreAssignment request) {
String path =
String.format(
"/api/2.0/accounts/%s/workspaces/%s/metastores/%s",
@@ -27,14 +28,15 @@ public void create(AccountsCreateMetastoreAssignment request) {
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, AccountsCreateMetastoreAssignmentResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
}
@Override
- public void delete(DeleteAccountMetastoreAssignmentRequest request) {
+ public AccountsDeleteMetastoreAssignmentResponse delete(
+ DeleteAccountMetastoreAssignmentRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/workspaces/%s/metastores/%s",
@@ -43,7 +45,7 @@ public void delete(DeleteAccountMetastoreAssignmentRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, AccountsDeleteMetastoreAssignmentResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
@@ -83,7 +85,8 @@ public ListAccountMetastoreAssignmentsResponse list(
}
@Override
- public void update(AccountsUpdateMetastoreAssignment request) {
+ public AccountsUpdateMetastoreAssignmentResponse update(
+ AccountsUpdateMetastoreAssignment request) {
String path =
String.format(
"/api/2.0/accounts/%s/workspaces/%s/metastores/%s",
@@ -93,7 +96,7 @@ public void update(AccountsUpdateMetastoreAssignment request) {
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, AccountsUpdateMetastoreAssignmentResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsService.java
index e1b89bd09..6ad225224 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoreAssignmentsService.java
@@ -13,14 +13,16 @@
@Generated
public interface AccountMetastoreAssignmentsService {
/** Creates an assignment to a metastore for a workspace */
- void create(AccountsCreateMetastoreAssignment accountsCreateMetastoreAssignment);
+ AccountsCreateMetastoreAssignmentResponse create(
+ AccountsCreateMetastoreAssignment accountsCreateMetastoreAssignment);
/** Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. */
- void delete(DeleteAccountMetastoreAssignmentRequest deleteAccountMetastoreAssignmentRequest);
+ AccountsDeleteMetastoreAssignmentResponse delete(
+ DeleteAccountMetastoreAssignmentRequest deleteAccountMetastoreAssignmentRequest);
/**
* Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace is
- * assigned a metastore, the mappig will be returned. If no metastore is assigned to the
+ * assigned a metastore, the mapping will be returned. If no metastore is assigned to the
* workspace, the assignment will not be found and a 404 returned.
*/
AccountsMetastoreAssignment get(
@@ -34,5 +36,6 @@ ListAccountMetastoreAssignmentsResponse list(
* Updates an assignment to a metastore for a workspace. Currently, only the default catalog may
* be updated.
*/
- void update(AccountsUpdateMetastoreAssignment accountsUpdateMetastoreAssignment);
+ AccountsUpdateMetastoreAssignmentResponse update(
+ AccountsUpdateMetastoreAssignment accountsUpdateMetastoreAssignment);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresAPI.java
index 601b17d68..52fd325f0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresAPI.java
@@ -28,36 +28,39 @@ public AccountMetastoresAPI(AccountMetastoresService mock) {
}
/** Creates a Unity Catalog metastore. */
- public AccountsMetastoreInfo create(AccountsCreateMetastore request) {
+ public AccountsCreateMetastoreResponse create(AccountsCreateMetastore request) {
return impl.create(request);
}
- public void delete(String metastoreId) {
- delete(new DeleteAccountMetastoreRequest().setMetastoreId(metastoreId));
+ public AccountsDeleteMetastoreResponse delete(String metastoreId) {
+ return delete(new DeleteAccountMetastoreRequest().setMetastoreId(metastoreId));
}
/** Deletes a Unity Catalog metastore for an account, both specified by ID. */
- public void delete(DeleteAccountMetastoreRequest request) {
- impl.delete(request);
+ public AccountsDeleteMetastoreResponse delete(DeleteAccountMetastoreRequest request) {
+ return impl.delete(request);
}
- public AccountsMetastoreInfo get(String metastoreId) {
+ public AccountsGetMetastoreResponse get(String metastoreId) {
return get(new GetAccountMetastoreRequest().setMetastoreId(metastoreId));
}
/** Gets a Unity Catalog metastore from an account, both specified by ID. */
- public AccountsMetastoreInfo get(GetAccountMetastoreRequest request) {
+ public AccountsGetMetastoreResponse get(GetAccountMetastoreRequest request) {
return impl.get(request);
}
/** Gets all Unity Catalog metastores associated with an account specified by ID. */
public Iterable list() {
return new Paginator<>(
- null, (Void v) -> impl.list(), ListMetastoresResponse::getMetastores, response -> null);
+ null,
+ (Void v) -> impl.list(),
+ AccountsListMetastoresResponse::getMetastores,
+ response -> null);
}
/** Updates an existing Unity Catalog metastore. */
- public AccountsMetastoreInfo update(AccountsUpdateMetastore request) {
+ public AccountsUpdateMetastoreResponse update(AccountsUpdateMetastore request) {
return impl.update(request);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresImpl.java
index 56328c4f0..e983809d0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresImpl.java
@@ -17,21 +17,21 @@ public AccountMetastoresImpl(ApiClient apiClient) {
}
@Override
- public AccountsMetastoreInfo create(AccountsCreateMetastore request) {
+ public AccountsCreateMetastoreResponse create(AccountsCreateMetastore request) {
String path = String.format("/api/2.0/accounts/%s/metastores", apiClient.configuredAccountID());
try {
Request req = new Request("POST", path, apiClient.serialize(request));
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- return apiClient.execute(req, AccountsMetastoreInfo.class);
+ return apiClient.execute(req, AccountsCreateMetastoreResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
}
@Override
- public void delete(DeleteAccountMetastoreRequest request) {
+ public AccountsDeleteMetastoreResponse delete(DeleteAccountMetastoreRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/metastores/%s",
@@ -40,14 +40,14 @@ public void delete(DeleteAccountMetastoreRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, AccountsDeleteMetastoreResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
}
@Override
- public AccountsMetastoreInfo get(GetAccountMetastoreRequest request) {
+ public AccountsGetMetastoreResponse get(GetAccountMetastoreRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/metastores/%s",
@@ -56,26 +56,26 @@ public AccountsMetastoreInfo get(GetAccountMetastoreRequest request) {
Request req = new Request("GET", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- return apiClient.execute(req, AccountsMetastoreInfo.class);
+ return apiClient.execute(req, AccountsGetMetastoreResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
}
@Override
- public ListMetastoresResponse list() {
+ public AccountsListMetastoresResponse list() {
String path = String.format("/api/2.0/accounts/%s/metastores", apiClient.configuredAccountID());
try {
Request req = new Request("GET", path);
req.withHeader("Accept", "application/json");
- return apiClient.execute(req, ListMetastoresResponse.class);
+ return apiClient.execute(req, AccountsListMetastoresResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
}
@Override
- public AccountsMetastoreInfo update(AccountsUpdateMetastore request) {
+ public AccountsUpdateMetastoreResponse update(AccountsUpdateMetastore request) {
String path =
String.format(
"/api/2.0/accounts/%s/metastores/%s",
@@ -85,7 +85,7 @@ public AccountsMetastoreInfo update(AccountsUpdateMetastore request) {
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- return apiClient.execute(req, AccountsMetastoreInfo.class);
+ return apiClient.execute(req, AccountsUpdateMetastoreResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresService.java
index 07e71499f..1ec720114 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountMetastoresService.java
@@ -14,17 +14,18 @@
@Generated
public interface AccountMetastoresService {
/** Creates a Unity Catalog metastore. */
- AccountsMetastoreInfo create(AccountsCreateMetastore accountsCreateMetastore);
+ AccountsCreateMetastoreResponse create(AccountsCreateMetastore accountsCreateMetastore);
/** Deletes a Unity Catalog metastore for an account, both specified by ID. */
- void delete(DeleteAccountMetastoreRequest deleteAccountMetastoreRequest);
+ AccountsDeleteMetastoreResponse delete(
+ DeleteAccountMetastoreRequest deleteAccountMetastoreRequest);
/** Gets a Unity Catalog metastore from an account, both specified by ID. */
- AccountsMetastoreInfo get(GetAccountMetastoreRequest getAccountMetastoreRequest);
+ AccountsGetMetastoreResponse get(GetAccountMetastoreRequest getAccountMetastoreRequest);
/** Gets all Unity Catalog metastores associated with an account specified by ID. */
- ListMetastoresResponse list();
+ AccountsListMetastoresResponse list();
/** Updates an existing Unity Catalog metastore. */
- AccountsMetastoreInfo update(AccountsUpdateMetastore accountsUpdateMetastore);
+ AccountsUpdateMetastoreResponse update(AccountsUpdateMetastore accountsUpdateMetastore);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsAPI.java
index b6ff09529..254dc846d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsAPI.java
@@ -25,20 +25,20 @@ public AccountStorageCredentialsAPI(AccountStorageCredentialsService mock) {
}
/**
- * Creates a new storage credential. The request object is specific to the cloud:
+ * Creates a new storage credential. The request object is specific to the cloud: - **AwsIamRole**
+ * for AWS credentials - **AzureServicePrincipal** for Azure credentials -
+ * **GcpServiceAccountKey** for GCP credentials
*
- * * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials *
- * **GcpServiceAcountKey** for GCP credentials.
- *
- *
The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on
+ *
The caller must be a metastore admin and have the `CREATE_STORAGE_CREDENTIAL` privilege on
* the metastore.
*/
- public AccountsStorageCredentialInfo create(AccountsCreateStorageCredential request) {
+ public AccountsCreateStorageCredentialInfo create(AccountsCreateStorageCredential request) {
return impl.create(request);
}
- public void delete(String metastoreId, String storageCredentialName) {
- delete(
+ public AccountsDeleteStorageCredentialResponse delete(
+ String metastoreId, String storageCredentialName) {
+ return delete(
new DeleteAccountStorageCredentialRequest()
.setMetastoreId(metastoreId)
.setStorageCredentialName(storageCredentialName));
@@ -48,8 +48,9 @@ public void delete(String metastoreId, String storageCredentialName) {
* Deletes a storage credential from the metastore. The caller must be an owner of the storage
* credential.
*/
- public void delete(DeleteAccountStorageCredentialRequest request) {
- impl.delete(request);
+ public AccountsDeleteStorageCredentialResponse delete(
+ DeleteAccountStorageCredentialRequest request) {
+ return impl.delete(request);
}
public AccountsStorageCredentialInfo get(String metastoreId, String storageCredentialName) {
@@ -82,9 +83,9 @@ public Iterable list(ListAccountStorageCredentialsRequest
/**
* Updates a storage credential on the metastore. The caller must be the owner of the storage
- * credential. If the caller is a metastore admin, only the __owner__ credential can be changed.
+ * credential. If the caller is a metastore admin, only the **owner** credential can be changed.
*/
- public AccountsStorageCredentialInfo update(AccountsUpdateStorageCredential request) {
+ public AccountsUpdateStorageCredentialResponse update(AccountsUpdateStorageCredential request) {
return impl.update(request);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsImpl.java
index 5eb10df59..26ed48604 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsImpl.java
@@ -17,7 +17,7 @@ public AccountStorageCredentialsImpl(ApiClient apiClient) {
}
@Override
- public AccountsStorageCredentialInfo create(AccountsCreateStorageCredential request) {
+ public AccountsCreateStorageCredentialInfo create(AccountsCreateStorageCredential request) {
String path =
String.format(
"/api/2.0/accounts/%s/metastores/%s/storage-credentials",
@@ -27,14 +27,15 @@ public AccountsStorageCredentialInfo create(AccountsCreateStorageCredential requ
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- return apiClient.execute(req, AccountsStorageCredentialInfo.class);
+ return apiClient.execute(req, AccountsCreateStorageCredentialInfo.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
}
@Override
- public void delete(DeleteAccountStorageCredentialRequest request) {
+ public AccountsDeleteStorageCredentialResponse delete(
+ DeleteAccountStorageCredentialRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/metastores/%s/storage-credentials/%s",
@@ -45,7 +46,7 @@ public void delete(DeleteAccountStorageCredentialRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, AccountsDeleteStorageCredentialResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
@@ -86,7 +87,7 @@ public ListAccountStorageCredentialsResponse list(ListAccountStorageCredentialsR
}
@Override
- public AccountsStorageCredentialInfo update(AccountsUpdateStorageCredential request) {
+ public AccountsUpdateStorageCredentialResponse update(AccountsUpdateStorageCredential request) {
String path =
String.format(
"/api/2.0/accounts/%s/metastores/%s/storage-credentials/%s",
@@ -98,7 +99,7 @@ public AccountsStorageCredentialInfo update(AccountsUpdateStorageCredential requ
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- return apiClient.execute(req, AccountsStorageCredentialInfo.class);
+ return apiClient.execute(req, AccountsUpdateStorageCredentialResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsService.java
index 2cceee11c..5c537dacb 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountStorageCredentialsService.java
@@ -13,22 +13,22 @@
@Generated
public interface AccountStorageCredentialsService {
/**
- * Creates a new storage credential. The request object is specific to the cloud:
+ * Creates a new storage credential. The request object is specific to the cloud: - **AwsIamRole**
+ * for AWS credentials - **AzureServicePrincipal** for Azure credentials -
+ * **GcpServiceAccountKey** for GCP credentials
*
- * * **AwsIamRole** for AWS credentials * **AzureServicePrincipal** for Azure credentials *
- * **GcpServiceAcountKey** for GCP credentials.
- *
- *
The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on
+ *
The caller must be a metastore admin and have the `CREATE_STORAGE_CREDENTIAL` privilege on
* the metastore.
*/
- AccountsStorageCredentialInfo create(
+ AccountsCreateStorageCredentialInfo create(
AccountsCreateStorageCredential accountsCreateStorageCredential);
/**
* Deletes a storage credential from the metastore. The caller must be an owner of the storage
* credential.
*/
- void delete(DeleteAccountStorageCredentialRequest deleteAccountStorageCredentialRequest);
+ AccountsDeleteStorageCredentialResponse delete(
+ DeleteAccountStorageCredentialRequest deleteAccountStorageCredentialRequest);
/**
* Gets a storage credential from the metastore. The caller must be a metastore admin, the owner
@@ -43,8 +43,8 @@ ListAccountStorageCredentialsResponse list(
/**
* Updates a storage credential on the metastore. The caller must be the owner of the storage
- * credential. If the caller is a metastore admin, only the __owner__ credential can be changed.
+ * credential. If the caller is a metastore admin, only the **owner** credential can be changed.
*/
- AccountsStorageCredentialInfo update(
+ AccountsUpdateStorageCredentialResponse update(
AccountsUpdateStorageCredential accountsUpdateStorageCredential);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java
index b297cbf98..e86dbfa1e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastore.java
@@ -7,18 +7,19 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** Properties of the new metastore. */
@Generated
public class AccountsCreateMetastore {
/** */
@JsonProperty("metastore_info")
- private CreateMetastore metastoreInfo;
+ private CreateAccountsMetastore metastoreInfo;
- public AccountsCreateMetastore setMetastoreInfo(CreateMetastore metastoreInfo) {
+ public AccountsCreateMetastore setMetastoreInfo(CreateAccountsMetastore metastoreInfo) {
this.metastoreInfo = metastoreInfo;
return this;
}
- public CreateMetastore getMetastoreInfo() {
+ public CreateAccountsMetastore getMetastoreInfo() {
return metastoreInfo;
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java
index fa3e7a1e7..5ce5863fe 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignment.java
@@ -8,6 +8,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** The mapping from workspace to metastore. */
@Generated
public class AccountsCreateMetastoreAssignment {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java
new file mode 100755
index 000000000..d8b003eaf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreAssignmentResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore assignment was successfully created. */
+@Generated
+public class AccountsCreateMetastoreAssignmentResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsCreateMetastoreAssignmentResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java
similarity index 76%
rename from databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java
rename to databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java
index 249aeb544..b6848ebe1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateMetastoreResponse.java
@@ -8,12 +8,12 @@
import java.util.Objects;
@Generated
-public class AccountsMetastoreInfo {
+public class AccountsCreateMetastoreResponse {
/** */
@JsonProperty("metastore_info")
private MetastoreInfo metastoreInfo;
- public AccountsMetastoreInfo setMetastoreInfo(MetastoreInfo metastoreInfo) {
+ public AccountsCreateMetastoreResponse setMetastoreInfo(MetastoreInfo metastoreInfo) {
this.metastoreInfo = metastoreInfo;
return this;
}
@@ -26,7 +26,7 @@ public MetastoreInfo getMetastoreInfo() {
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
- AccountsMetastoreInfo that = (AccountsMetastoreInfo) o;
+ AccountsCreateMetastoreResponse that = (AccountsCreateMetastoreResponse) o;
return Objects.equals(metastoreInfo, that.metastoreInfo);
}
@@ -37,7 +37,7 @@ public int hashCode() {
@Override
public String toString() {
- return new ToStringer(AccountsMetastoreInfo.class)
+ return new ToStringer(AccountsCreateMetastoreResponse.class)
.add("metastoreInfo", metastoreInfo)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java
index c1c33ea9f..a19caa490 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredential.java
@@ -12,17 +12,25 @@
public class AccountsCreateStorageCredential {
/** */
@JsonProperty("credential_info")
- private CreateStorageCredential credentialInfo;
+ private CreateAccountsStorageCredential credentialInfo;
/** Unity Catalog metastore ID */
@JsonIgnore private String metastoreId;
- public AccountsCreateStorageCredential setCredentialInfo(CreateStorageCredential credentialInfo) {
+ /**
+ * Optional, default false. Supplying true to this argument skips validation of the created set of
+ * credentials.
+ */
+ @JsonProperty("skip_validation")
+ private Boolean skipValidation;
+
+ public AccountsCreateStorageCredential setCredentialInfo(
+ CreateAccountsStorageCredential credentialInfo) {
this.credentialInfo = credentialInfo;
return this;
}
- public CreateStorageCredential getCredentialInfo() {
+ public CreateAccountsStorageCredential getCredentialInfo() {
return credentialInfo;
}
@@ -35,18 +43,28 @@ public String getMetastoreId() {
return metastoreId;
}
+ public AccountsCreateStorageCredential setSkipValidation(Boolean skipValidation) {
+ this.skipValidation = skipValidation;
+ return this;
+ }
+
+ public Boolean getSkipValidation() {
+ return skipValidation;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
AccountsCreateStorageCredential that = (AccountsCreateStorageCredential) o;
return Objects.equals(credentialInfo, that.credentialInfo)
- && Objects.equals(metastoreId, that.metastoreId);
+ && Objects.equals(metastoreId, that.metastoreId)
+ && Objects.equals(skipValidation, that.skipValidation);
}
@Override
public int hashCode() {
- return Objects.hash(credentialInfo, metastoreId);
+ return Objects.hash(credentialInfo, metastoreId, skipValidation);
}
@Override
@@ -54,6 +72,7 @@ public String toString() {
return new ToStringer(AccountsCreateStorageCredential.class)
.add("credentialInfo", credentialInfo)
.add("metastoreId", metastoreId)
+ .add("skipValidation", skipValidation)
.toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java
new file mode 100755
index 000000000..420d976e3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsCreateStorageCredentialInfo.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AccountsCreateStorageCredentialInfo {
+ /** */
+ @JsonProperty("credential_info")
+ private StorageCredentialInfo credentialInfo;
+
+ public AccountsCreateStorageCredentialInfo setCredentialInfo(
+ StorageCredentialInfo credentialInfo) {
+ this.credentialInfo = credentialInfo;
+ return this;
+ }
+
+ public StorageCredentialInfo getCredentialInfo() {
+ return credentialInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsCreateStorageCredentialInfo that = (AccountsCreateStorageCredentialInfo) o;
+ return Objects.equals(credentialInfo, that.credentialInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(credentialInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsCreateStorageCredentialInfo.class)
+ .add("credentialInfo", credentialInfo)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java
new file mode 100755
index 000000000..bb80b9155
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreAssignmentResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore assignment was successfully deleted. */
+@Generated
+public class AccountsDeleteMetastoreAssignmentResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsDeleteMetastoreAssignmentResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java
new file mode 100755
index 000000000..c764feedf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteMetastoreResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore was successfully deleted. */
+@Generated
+public class AccountsDeleteMetastoreResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsDeleteMetastoreResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java
new file mode 100755
index 000000000..125aa3bdf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsDeleteStorageCredentialResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The storage credential was successfully deleted. */
+@Generated
+public class AccountsDeleteStorageCredentialResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsDeleteStorageCredentialResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java
new file mode 100755
index 000000000..2da0eb3f3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsGetMetastoreResponse.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The metastore was successfully returned. */
+@Generated
+public class AccountsGetMetastoreResponse {
+ /** */
+ @JsonProperty("metastore_info")
+ private MetastoreInfo metastoreInfo;
+
+ public AccountsGetMetastoreResponse setMetastoreInfo(MetastoreInfo metastoreInfo) {
+ this.metastoreInfo = metastoreInfo;
+ return this;
+ }
+
+ public MetastoreInfo getMetastoreInfo() {
+ return metastoreInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsGetMetastoreResponse that = (AccountsGetMetastoreResponse) o;
+ return Objects.equals(metastoreInfo, that.metastoreInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(metastoreInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsGetMetastoreResponse.class)
+ .add("metastoreInfo", metastoreInfo)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java
new file mode 100755
index 000000000..95620fe4d
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsListMetastoresResponse.java
@@ -0,0 +1,46 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Metastores were returned successfully. */
+@Generated
+public class AccountsListMetastoresResponse {
+ /** An array of metastore information objects. */
+ @JsonProperty("metastores")
+ private Collection metastores;
+
+ public AccountsListMetastoresResponse setMetastores(Collection metastores) {
+ this.metastores = metastores;
+ return this;
+ }
+
+ public Collection getMetastores() {
+ return metastores;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsListMetastoresResponse that = (AccountsListMetastoresResponse) o;
+ return Objects.equals(metastores, that.metastores);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(metastores);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsListMetastoresResponse.class)
+ .add("metastores", metastores)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreAssignment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreAssignment.java
index bf989d674..fbe83d2bd 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreAssignment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsMetastoreAssignment.java
@@ -7,6 +7,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** The workspace metastore assignment was successfully returned. */
@Generated
public class AccountsMetastoreAssignment {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsStorageCredentialInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsStorageCredentialInfo.java
index 696342a98..f3e5074e9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsStorageCredentialInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsStorageCredentialInfo.java
@@ -7,6 +7,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** The storage credential was successfully retrieved. */
@Generated
public class AccountsStorageCredentialInfo {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastore.java
index 044d8c6f2..74a421445 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastore.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastore.java
@@ -8,14 +8,15 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** Properties of the metastore to change. */
@Generated
public class AccountsUpdateMetastore {
/** Unity Catalog metastore ID */
@JsonIgnore private String metastoreId;
- /** */
+ /** Properties of the metastore to change. */
@JsonProperty("metastore_info")
- private UpdateMetastore metastoreInfo;
+ private UpdateAccountsMetastore metastoreInfo;
public AccountsUpdateMetastore setMetastoreId(String metastoreId) {
this.metastoreId = metastoreId;
@@ -26,12 +27,12 @@ public String getMetastoreId() {
return metastoreId;
}
- public AccountsUpdateMetastore setMetastoreInfo(UpdateMetastore metastoreInfo) {
+ public AccountsUpdateMetastore setMetastoreInfo(UpdateAccountsMetastore metastoreInfo) {
this.metastoreInfo = metastoreInfo;
return this;
}
- public UpdateMetastore getMetastoreInfo() {
+ public UpdateAccountsMetastore getMetastoreInfo() {
return metastoreInfo;
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignment.java
index 3ce7c6f48..a50a5eab0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignment.java
@@ -8,6 +8,7 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** The metastore assignment to update. */
@Generated
public class AccountsUpdateMetastoreAssignment {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignmentResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignmentResponse.java
new file mode 100755
index 000000000..648dbfa47
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreAssignmentResponse.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** The metastore assignment was successfully updated. */
+@Generated
+public class AccountsUpdateMetastoreAssignmentResponse {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsUpdateMetastoreAssignmentResponse.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreResponse.java
new file mode 100755
index 000000000..4a4d5d560
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateMetastoreResponse.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The metastore update request succeeded. */
+@Generated
+public class AccountsUpdateMetastoreResponse {
+ /** */
+ @JsonProperty("metastore_info")
+ private MetastoreInfo metastoreInfo;
+
+ public AccountsUpdateMetastoreResponse setMetastoreInfo(MetastoreInfo metastoreInfo) {
+ this.metastoreInfo = metastoreInfo;
+ return this;
+ }
+
+ public MetastoreInfo getMetastoreInfo() {
+ return metastoreInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsUpdateMetastoreResponse that = (AccountsUpdateMetastoreResponse) o;
+ return Objects.equals(metastoreInfo, that.metastoreInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(metastoreInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsUpdateMetastoreResponse.class)
+ .add("metastoreInfo", metastoreInfo)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredential.java
index bca8ab349..f654e4d3b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredential.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredential.java
@@ -8,24 +8,32 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** The storage credential to update. */
@Generated
public class AccountsUpdateStorageCredential {
/** */
@JsonProperty("credential_info")
- private UpdateStorageCredential credentialInfo;
+ private UpdateAccountsStorageCredential credentialInfo;
/** Unity Catalog metastore ID */
@JsonIgnore private String metastoreId;
+ /**
+ * Optional. Supplying true to this argument skips validation of the updated set of credentials.
+ */
+ @JsonProperty("skip_validation")
+ private Boolean skipValidation;
+
/** Name of the storage credential. */
@JsonIgnore private String storageCredentialName;
- public AccountsUpdateStorageCredential setCredentialInfo(UpdateStorageCredential credentialInfo) {
+ public AccountsUpdateStorageCredential setCredentialInfo(
+ UpdateAccountsStorageCredential credentialInfo) {
this.credentialInfo = credentialInfo;
return this;
}
- public UpdateStorageCredential getCredentialInfo() {
+ public UpdateAccountsStorageCredential getCredentialInfo() {
return credentialInfo;
}
@@ -38,6 +46,15 @@ public String getMetastoreId() {
return metastoreId;
}
+ public AccountsUpdateStorageCredential setSkipValidation(Boolean skipValidation) {
+ this.skipValidation = skipValidation;
+ return this;
+ }
+
+ public Boolean getSkipValidation() {
+ return skipValidation;
+ }
+
public AccountsUpdateStorageCredential setStorageCredentialName(String storageCredentialName) {
this.storageCredentialName = storageCredentialName;
return this;
@@ -54,12 +71,13 @@ public boolean equals(Object o) {
AccountsUpdateStorageCredential that = (AccountsUpdateStorageCredential) o;
return Objects.equals(credentialInfo, that.credentialInfo)
&& Objects.equals(metastoreId, that.metastoreId)
+ && Objects.equals(skipValidation, that.skipValidation)
&& Objects.equals(storageCredentialName, that.storageCredentialName);
}
@Override
public int hashCode() {
- return Objects.hash(credentialInfo, metastoreId, storageCredentialName);
+ return Objects.hash(credentialInfo, metastoreId, skipValidation, storageCredentialName);
}
@Override
@@ -67,6 +85,7 @@ public String toString() {
return new ToStringer(AccountsUpdateStorageCredential.class)
.add("credentialInfo", credentialInfo)
.add("metastoreId", metastoreId)
+ .add("skipValidation", skipValidation)
.add("storageCredentialName", storageCredentialName)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredentialResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredentialResponse.java
new file mode 100755
index 000000000..0f1796167
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/AccountsUpdateStorageCredentialResponse.java
@@ -0,0 +1,46 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The storage credential was successfully updated. */
+@Generated
+public class AccountsUpdateStorageCredentialResponse {
+ /** */
+ @JsonProperty("credential_info")
+ private StorageCredentialInfo credentialInfo;
+
+ public AccountsUpdateStorageCredentialResponse setCredentialInfo(
+ StorageCredentialInfo credentialInfo) {
+ this.credentialInfo = credentialInfo;
+ return this;
+ }
+
+ public StorageCredentialInfo getCredentialInfo() {
+ return credentialInfo;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AccountsUpdateStorageCredentialResponse that = (AccountsUpdateStorageCredentialResponse) o;
+ return Objects.equals(credentialInfo, that.credentialInfo);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(credentialInfo);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AccountsUpdateStorageCredentialResponse.class)
+ .add("credentialInfo", credentialInfo)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java
new file mode 100755
index 000000000..2d82924c0
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsMetastore.java
@@ -0,0 +1,74 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CreateAccountsMetastore {
+ /** The user-specified name of the metastore. */
+ @JsonProperty("name")
+ private String name;
+
+ /** Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). */
+ @JsonProperty("region")
+ private String region;
+
+ /** The storage root URL for metastore */
+ @JsonProperty("storage_root")
+ private String storageRoot;
+
+ public CreateAccountsMetastore setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public CreateAccountsMetastore setRegion(String region) {
+ this.region = region;
+ return this;
+ }
+
+ public String getRegion() {
+ return region;
+ }
+
+ public CreateAccountsMetastore setStorageRoot(String storageRoot) {
+ this.storageRoot = storageRoot;
+ return this;
+ }
+
+ public String getStorageRoot() {
+ return storageRoot;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CreateAccountsMetastore that = (CreateAccountsMetastore) o;
+ return Objects.equals(name, that.name)
+ && Objects.equals(region, that.region)
+ && Objects.equals(storageRoot, that.storageRoot);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(name, region, storageRoot);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CreateAccountsMetastore.class)
+ .add("name", name)
+ .add("region", region)
+ .add("storageRoot", storageRoot)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java
new file mode 100755
index 000000000..8e636a900
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateAccountsStorageCredential.java
@@ -0,0 +1,167 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CreateAccountsStorageCredential {
+ /** The AWS IAM role configuration. */
+ @JsonProperty("aws_iam_role")
+ private AwsIamRoleRequest awsIamRole;
+
+ /** The Azure managed identity configuration. */
+ @JsonProperty("azure_managed_identity")
+ private AzureManagedIdentityRequest azureManagedIdentity;
+
+ /** The Azure service principal configuration. */
+ @JsonProperty("azure_service_principal")
+ private AzureServicePrincipal azureServicePrincipal;
+
+ /** The Cloudflare API token configuration. */
+ @JsonProperty("cloudflare_api_token")
+ private CloudflareApiToken cloudflareApiToken;
+
+ /** Comment associated with the credential. */
+ @JsonProperty("comment")
+ private String comment;
+
+ /** The Databricks managed GCP service account configuration. */
+ @JsonProperty("databricks_gcp_service_account")
+ private DatabricksGcpServiceAccountRequest databricksGcpServiceAccount;
+
+ /**
+ * The credential name. The name must be unique among storage and service credentials within the
+ * metastore.
+ */
+ @JsonProperty("name")
+ private String name;
+
+ /**
+ * Whether the credential is usable only for read operations. Only applicable when purpose is
+ * **STORAGE**.
+ */
+ @JsonProperty("read_only")
+ private Boolean readOnly;
+
+ public CreateAccountsStorageCredential setAwsIamRole(AwsIamRoleRequest awsIamRole) {
+ this.awsIamRole = awsIamRole;
+ return this;
+ }
+
+ public AwsIamRoleRequest getAwsIamRole() {
+ return awsIamRole;
+ }
+
+ public CreateAccountsStorageCredential setAzureManagedIdentity(
+ AzureManagedIdentityRequest azureManagedIdentity) {
+ this.azureManagedIdentity = azureManagedIdentity;
+ return this;
+ }
+
+ public AzureManagedIdentityRequest getAzureManagedIdentity() {
+ return azureManagedIdentity;
+ }
+
+ public CreateAccountsStorageCredential setAzureServicePrincipal(
+ AzureServicePrincipal azureServicePrincipal) {
+ this.azureServicePrincipal = azureServicePrincipal;
+ return this;
+ }
+
+ public AzureServicePrincipal getAzureServicePrincipal() {
+ return azureServicePrincipal;
+ }
+
+ public CreateAccountsStorageCredential setCloudflareApiToken(
+ CloudflareApiToken cloudflareApiToken) {
+ this.cloudflareApiToken = cloudflareApiToken;
+ return this;
+ }
+
+ public CloudflareApiToken getCloudflareApiToken() {
+ return cloudflareApiToken;
+ }
+
+ public CreateAccountsStorageCredential setComment(String comment) {
+ this.comment = comment;
+ return this;
+ }
+
+ public String getComment() {
+ return comment;
+ }
+
+ public CreateAccountsStorageCredential setDatabricksGcpServiceAccount(
+ DatabricksGcpServiceAccountRequest databricksGcpServiceAccount) {
+ this.databricksGcpServiceAccount = databricksGcpServiceAccount;
+ return this;
+ }
+
+ public DatabricksGcpServiceAccountRequest getDatabricksGcpServiceAccount() {
+ return databricksGcpServiceAccount;
+ }
+
+ public CreateAccountsStorageCredential setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public CreateAccountsStorageCredential setReadOnly(Boolean readOnly) {
+ this.readOnly = readOnly;
+ return this;
+ }
+
+ public Boolean getReadOnly() {
+ return readOnly;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CreateAccountsStorageCredential that = (CreateAccountsStorageCredential) o;
+ return Objects.equals(awsIamRole, that.awsIamRole)
+ && Objects.equals(azureManagedIdentity, that.azureManagedIdentity)
+ && Objects.equals(azureServicePrincipal, that.azureServicePrincipal)
+ && Objects.equals(cloudflareApiToken, that.cloudflareApiToken)
+ && Objects.equals(comment, that.comment)
+ && Objects.equals(databricksGcpServiceAccount, that.databricksGcpServiceAccount)
+ && Objects.equals(name, that.name)
+ && Objects.equals(readOnly, that.readOnly);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ awsIamRole,
+ azureManagedIdentity,
+ azureServicePrincipal,
+ cloudflareApiToken,
+ comment,
+ databricksGcpServiceAccount,
+ name,
+ readOnly);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CreateAccountsStorageCredential.class)
+ .add("awsIamRole", awsIamRole)
+ .add("azureManagedIdentity", azureManagedIdentity)
+ .add("azureServicePrincipal", azureServicePrincipal)
+ .add("cloudflareApiToken", cloudflareApiToken)
+ .add("comment", comment)
+ .add("databricksGcpServiceAccount", databricksGcpServiceAccount)
+ .add("name", name)
+ .add("readOnly", readOnly)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java
index 83052df1f..56a9b59e3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunction.java
@@ -9,7 +9,7 @@
@Generated
public class CreateFunction {
- /** Name of parent catalog. */
+ /** Name of parent Catalog. */
@JsonProperty("catalog_name")
private String catalogName;
@@ -33,7 +33,7 @@ public class CreateFunction {
@JsonProperty("full_data_type")
private String fullDataType;
- /** */
+ /** Function input parameters. */
@JsonProperty("input_params")
private FunctionParameterInfos inputParams;
@@ -63,8 +63,8 @@ public class CreateFunction {
/**
* Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot
- * be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
+ * specified in the **external_language** field, and the **return_params** of the function cannot
+ * be used (as **TABLE** return type is not supported), and the **sql_data_access** field must be
* **NO_SQL**.
*/
@JsonProperty("routine_body")
@@ -74,11 +74,11 @@ public class CreateFunction {
@JsonProperty("routine_definition")
private String routineDefinition;
- /** Function dependencies. */
+ /** function dependencies. */
@JsonProperty("routine_dependencies")
private DependencyList routineDependencies;
- /** Name of parent schema relative to its parent catalog. */
+ /** Name of parent Schema relative to its parent Catalog. */
@JsonProperty("schema_name")
private String schemaName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java
index 691b8e514..721bb01f9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionParameterStyle.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function parameter style. **S** is the value for SQL. */
@Generated
public enum CreateFunctionParameterStyle {
S,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java
index 6132a4c2a..f5b1b42e9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionRoutineBody.java
@@ -4,12 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot be
- * used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
- * **NO_SQL**.
- */
@Generated
public enum CreateFunctionRoutineBody {
EXTERNAL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java
index a0b13a4ee..480b1279a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSecurityType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** The security type of the function. */
@Generated
public enum CreateFunctionSecurityType {
DEFINER,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java
index d8cb91987..28cb1b373 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateFunctionSqlDataAccess.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function SQL data access. */
@Generated
public enum CreateFunctionSqlDataAccess {
CONTAINS_SQL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java
index 520b0f60a..71a3650f9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateRegisteredModelRequest.java
@@ -5,10 +5,22 @@
import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
import java.util.Objects;
@Generated
public class CreateRegisteredModelRequest {
+ /** List of aliases associated with the registered model */
+ @JsonProperty("aliases")
+ private Collection aliases;
+
+ /**
+ * Indicates whether the principal is limited to retrieving metadata for the associated object
+ * through the BROWSE privilege when include_browse is enabled in the request.
+ */
+ @JsonProperty("browse_only")
+ private Boolean browseOnly;
+
/** The name of the catalog where the schema and the registered model reside */
@JsonProperty("catalog_name")
private String catalogName;
@@ -17,10 +29,30 @@ public class CreateRegisteredModelRequest {
@JsonProperty("comment")
private String comment;
+ /** Creation timestamp of the registered model in milliseconds since the Unix epoch */
+ @JsonProperty("created_at")
+ private Long createdAt;
+
+ /** The identifier of the user who created the registered model */
+ @JsonProperty("created_by")
+ private String createdBy;
+
+ /** The three-level (fully qualified) name of the registered model */
+ @JsonProperty("full_name")
+ private String fullName;
+
+ /** The unique identifier of the metastore */
+ @JsonProperty("metastore_id")
+ private String metastoreId;
+
/** The name of the registered model */
@JsonProperty("name")
private String name;
+ /** The identifier of the user who owns the registered model */
+ @JsonProperty("owner")
+ private String owner;
+
/** The name of the schema where the registered model resides */
@JsonProperty("schema_name")
private String schemaName;
@@ -29,6 +61,32 @@ public class CreateRegisteredModelRequest {
@JsonProperty("storage_location")
private String storageLocation;
+ /** Last-update timestamp of the registered model in milliseconds since the Unix epoch */
+ @JsonProperty("updated_at")
+ private Long updatedAt;
+
+ /** The identifier of the user who updated the registered model last time */
+ @JsonProperty("updated_by")
+ private String updatedBy;
+
+ public CreateRegisteredModelRequest setAliases(Collection aliases) {
+ this.aliases = aliases;
+ return this;
+ }
+
+ public Collection getAliases() {
+ return aliases;
+ }
+
+ public CreateRegisteredModelRequest setBrowseOnly(Boolean browseOnly) {
+ this.browseOnly = browseOnly;
+ return this;
+ }
+
+ public Boolean getBrowseOnly() {
+ return browseOnly;
+ }
+
public CreateRegisteredModelRequest setCatalogName(String catalogName) {
this.catalogName = catalogName;
return this;
@@ -47,6 +105,42 @@ public String getComment() {
return comment;
}
+ public CreateRegisteredModelRequest setCreatedAt(Long createdAt) {
+ this.createdAt = createdAt;
+ return this;
+ }
+
+ public Long getCreatedAt() {
+ return createdAt;
+ }
+
+ public CreateRegisteredModelRequest setCreatedBy(String createdBy) {
+ this.createdBy = createdBy;
+ return this;
+ }
+
+ public String getCreatedBy() {
+ return createdBy;
+ }
+
+ public CreateRegisteredModelRequest setFullName(String fullName) {
+ this.fullName = fullName;
+ return this;
+ }
+
+ public String getFullName() {
+ return fullName;
+ }
+
+ public CreateRegisteredModelRequest setMetastoreId(String metastoreId) {
+ this.metastoreId = metastoreId;
+ return this;
+ }
+
+ public String getMetastoreId() {
+ return metastoreId;
+ }
+
public CreateRegisteredModelRequest setName(String name) {
this.name = name;
return this;
@@ -56,6 +150,15 @@ public String getName() {
return name;
}
+ public CreateRegisteredModelRequest setOwner(String owner) {
+ this.owner = owner;
+ return this;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
public CreateRegisteredModelRequest setSchemaName(String schemaName) {
this.schemaName = schemaName;
return this;
@@ -74,31 +177,81 @@ public String getStorageLocation() {
return storageLocation;
}
+ public CreateRegisteredModelRequest setUpdatedAt(Long updatedAt) {
+ this.updatedAt = updatedAt;
+ return this;
+ }
+
+ public Long getUpdatedAt() {
+ return updatedAt;
+ }
+
+ public CreateRegisteredModelRequest setUpdatedBy(String updatedBy) {
+ this.updatedBy = updatedBy;
+ return this;
+ }
+
+ public String getUpdatedBy() {
+ return updatedBy;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateRegisteredModelRequest that = (CreateRegisteredModelRequest) o;
- return Objects.equals(catalogName, that.catalogName)
+ return Objects.equals(aliases, that.aliases)
+ && Objects.equals(browseOnly, that.browseOnly)
+ && Objects.equals(catalogName, that.catalogName)
&& Objects.equals(comment, that.comment)
+ && Objects.equals(createdAt, that.createdAt)
+ && Objects.equals(createdBy, that.createdBy)
+ && Objects.equals(fullName, that.fullName)
+ && Objects.equals(metastoreId, that.metastoreId)
&& Objects.equals(name, that.name)
+ && Objects.equals(owner, that.owner)
&& Objects.equals(schemaName, that.schemaName)
- && Objects.equals(storageLocation, that.storageLocation);
+ && Objects.equals(storageLocation, that.storageLocation)
+ && Objects.equals(updatedAt, that.updatedAt)
+ && Objects.equals(updatedBy, that.updatedBy);
}
@Override
public int hashCode() {
- return Objects.hash(catalogName, comment, name, schemaName, storageLocation);
+ return Objects.hash(
+ aliases,
+ browseOnly,
+ catalogName,
+ comment,
+ createdAt,
+ createdBy,
+ fullName,
+ metastoreId,
+ name,
+ owner,
+ schemaName,
+ storageLocation,
+ updatedAt,
+ updatedBy);
}
@Override
public String toString() {
return new ToStringer(CreateRegisteredModelRequest.class)
+ .add("aliases", aliases)
+ .add("browseOnly", browseOnly)
.add("catalogName", catalogName)
.add("comment", comment)
+ .add("createdAt", createdAt)
+ .add("createdBy", createdBy)
+ .add("fullName", fullName)
+ .add("metastoreId", metastoreId)
.add("name", name)
+ .add("owner", owner)
.add("schemaName", schemaName)
.add("storageLocation", storageLocation)
+ .add("updatedAt", updatedAt)
+ .add("updatedBy", updatedBy)
.toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateVolumeRequestContent.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateVolumeRequestContent.java
index 16f0ebbc6..bbe39faf4 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateVolumeRequestContent.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/CreateVolumeRequestContent.java
@@ -29,7 +29,13 @@ public class CreateVolumeRequestContent {
@JsonProperty("storage_location")
private String storageLocation;
- /** */
+ /**
+ * The type of the volume. An external volume is located in the specified external location. A
+ * managed volume is located in the default location which is specified by the parent schema, or
+ * the parent catalog, or the Metastore. [Learn more]
+ *
+ * [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external
+ */
@JsonProperty("volume_type")
private VolumeType volumeType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java
index 3609ad11f..35a82e3ec 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/DeleteFunctionRequest.java
@@ -17,7 +17,7 @@ public class DeleteFunctionRequest {
/**
* The fully-qualified name of the function (of the form
- * __catalog_name__.__schema_name__.__function__name__).
+ * __catalog_name__.__schema_name__.__function__name__) .
*/
@JsonIgnore private String name;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java
index 02b5d835d..69ca56bd1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfo.java
@@ -16,7 +16,7 @@ public class FunctionInfo {
@JsonProperty("browse_only")
private Boolean browseOnly;
- /** Name of parent catalog. */
+ /** Name of parent Catalog. */
@JsonProperty("catalog_name")
private String catalogName;
@@ -48,7 +48,7 @@ public class FunctionInfo {
@JsonProperty("full_data_type")
private String fullDataType;
- /** Full name of function, in form of __catalog_name__.__schema_name__.__function__name__ */
+ /** Full name of Function, in form of **catalog_name**.**schema_name**.**function_name** */
@JsonProperty("full_name")
private String fullName;
@@ -56,7 +56,7 @@ public class FunctionInfo {
@JsonProperty("function_id")
private String functionId;
- /** */
+ /** Function input parameters. */
@JsonProperty("input_params")
private FunctionParameterInfos inputParams;
@@ -76,7 +76,7 @@ public class FunctionInfo {
@JsonProperty("name")
private String name;
- /** Username of current owner of function. */
+ /** Username of current owner of the function. */
@JsonProperty("owner")
private String owner;
@@ -94,8 +94,8 @@ public class FunctionInfo {
/**
* Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot
- * be used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
+ * specified in the **external_language** field, and the **return_params** of the function cannot
+ * be used (as **TABLE** return type is not supported), and the **sql_data_access** field must be
* **NO_SQL**.
*/
@JsonProperty("routine_body")
@@ -105,11 +105,11 @@ public class FunctionInfo {
@JsonProperty("routine_definition")
private String routineDefinition;
- /** Function dependencies. */
+ /** function dependencies. */
@JsonProperty("routine_dependencies")
private DependencyList routineDependencies;
- /** Name of parent schema relative to its parent catalog. */
+ /** Name of parent Schema relative to its parent Catalog. */
@JsonProperty("schema_name")
private String schemaName;
@@ -129,11 +129,11 @@ public class FunctionInfo {
@JsonProperty("sql_path")
private String sqlPath;
- /** Time at which this function was created, in epoch milliseconds. */
+ /** Time at which this function was last modified, in epoch milliseconds. */
@JsonProperty("updated_at")
private Long updatedAt;
- /** Username of user who last modified function. */
+ /** Username of user who last modified the function. */
@JsonProperty("updated_by")
private String updatedBy;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java
index fab71fe1e..608574f72 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoParameterStyle.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function parameter style. **S** is the value for SQL. */
@Generated
public enum FunctionInfoParameterStyle {
S,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java
index 24f8266e0..f69f1f670 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoRoutineBody.java
@@ -4,12 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Function language. When **EXTERNAL** is used, the language of the routine function should be
- * specified in the __external_language__ field, and the __return_params__ of the function cannot be
- * used (as **TABLE** return type is not supported), and the __sql_data_access__ field must be
- * **NO_SQL**.
- */
@Generated
public enum FunctionInfoRoutineBody {
EXTERNAL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java
index 5b45675b4..ce6545a69 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSecurityType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** The security type of the function. */
@Generated
public enum FunctionInfoSecurityType {
DEFINER,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java
index 69b362394..fee8adcc8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionInfoSqlDataAccess.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Function SQL data access. */
@Generated
public enum FunctionInfoSqlDataAccess {
CONTAINS_SQL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java
index 7e41e1dc0..ce5724ef9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfo.java
@@ -13,7 +13,7 @@ public class FunctionParameterInfo {
@JsonProperty("comment")
private String comment;
- /** Name of parameter. */
+ /** Name of Parameter. */
@JsonProperty("name")
private String name;
@@ -21,11 +21,11 @@ public class FunctionParameterInfo {
@JsonProperty("parameter_default")
private String parameterDefault;
- /** */
+ /** Function parameter mode. */
@JsonProperty("parameter_mode")
private FunctionParameterMode parameterMode;
- /** */
+ /** Function parameter type. */
@JsonProperty("parameter_type")
private FunctionParameterType parameterType;
@@ -41,7 +41,7 @@ public class FunctionParameterInfo {
@JsonProperty("type_json")
private String typeJson;
- /** */
+ /** Name of type (INT, STRUCT, MAP, etc.) */
@JsonProperty("type_name")
private ColumnTypeName typeName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java
index 8242101b9..f2941005c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterInfos.java
@@ -10,7 +10,7 @@
@Generated
public class FunctionParameterInfos {
- /** The array of __FunctionParameterInfo__ definitions of the function's parameters. */
+ /** */
@JsonProperty("parameters")
private Collection parameters;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterMode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterMode.java
index 731e91b2c..48a9a1870 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterMode.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterMode.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** The mode of the function parameter. */
@Generated
public enum FunctionParameterMode {
IN,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterType.java
index 7e930bcd6..046f5037f 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionParameterType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** The type of function parameter. */
@Generated
public enum FunctionParameterType {
COLUMN,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionsImpl.java
index 387db0b64..2f7c48378 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/FunctionsImpl.java
@@ -36,7 +36,6 @@ public void delete(DeleteFunctionRequest request) {
try {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
- req.withHeader("Accept", "application/json");
apiClient.execute(req, Void.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java
index 29c20c2fe..54943217b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/GetAccountStorageCredentialRequest.java
@@ -12,7 +12,7 @@ public class GetAccountStorageCredentialRequest {
/** Unity Catalog metastore ID */
@JsonIgnore private String metastoreId;
- /** Name of the storage credential. */
+ /** Required. Name of the storage credential. */
@JsonIgnore private String storageCredentialName;
public GetAccountStorageCredentialRequest setMetastoreId(String metastoreId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java
index 5ca1d4263..be5de6d2e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountMetastoreAssignmentsResponse.java
@@ -8,7 +8,7 @@
import java.util.Collection;
import java.util.Objects;
-/** The list of workspaces to which the given metastore is assigned. */
+/** The metastore assignments were successfully returned. */
@Generated
public class ListAccountMetastoreAssignmentsResponse {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java
index a5da186e4..fde3a512a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ListAccountStorageCredentialsResponse.java
@@ -8,6 +8,7 @@
import java.util.Collection;
import java.util.Objects;
+/** The metastore storage credentials were successfully returned. */
@Generated
public class ListAccountStorageCredentialsResponse {
/** An array of metastore storage credentials. */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java
index 0d9640d5f..3f78acc3b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/MetastoreAssignment.java
@@ -9,7 +9,10 @@
@Generated
public class MetastoreAssignment {
- /** The name of the default catalog in the metastore. */
+ /**
+ * The name of the default catalog in the metastore. This field is deprecated. Please use "Default
+ * Namespace API" to configure the default catalog for a Databricks workspace.
+ */
@JsonProperty("default_catalog_name")
private String defaultCatalogName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java
index 8dbd67ae1..dd2cc00b9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfo.java
@@ -14,13 +14,6 @@ public class ModelVersionInfo {
@JsonProperty("aliases")
private Collection aliases;
- /**
- * Indicates whether the principal is limited to retrieving metadata for the associated object
- * through the BROWSE privilege when include_browse is enabled in the request.
- */
- @JsonProperty("browse_only")
- private Boolean browseOnly;
-
/** The name of the catalog containing the model version */
@JsonProperty("catalog_name")
private String catalogName;
@@ -109,15 +102,6 @@ public Collection getAliases() {
return aliases;
}
- public ModelVersionInfo setBrowseOnly(Boolean browseOnly) {
- this.browseOnly = browseOnly;
- return this;
- }
-
- public Boolean getBrowseOnly() {
- return browseOnly;
- }
-
public ModelVersionInfo setCatalogName(String catalogName) {
this.catalogName = catalogName;
return this;
@@ -277,7 +261,6 @@ public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
ModelVersionInfo that = (ModelVersionInfo) o;
return Objects.equals(aliases, that.aliases)
- && Objects.equals(browseOnly, that.browseOnly)
&& Objects.equals(catalogName, that.catalogName)
&& Objects.equals(comment, that.comment)
&& Objects.equals(createdAt, that.createdAt)
@@ -301,7 +284,6 @@ public boolean equals(Object o) {
public int hashCode() {
return Objects.hash(
aliases,
- browseOnly,
catalogName,
comment,
createdAt,
@@ -325,7 +307,6 @@ public int hashCode() {
public String toString() {
return new ToStringer(ModelVersionInfo.class)
.add("aliases", aliases)
- .add("browseOnly", browseOnly)
.add("catalogName", catalogName)
.add("comment", comment)
.add("createdAt", createdAt)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfoStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfoStatus.java
index ffbbbc6a5..5ff0b7b8a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfoStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/ModelVersionInfoStatus.java
@@ -4,14 +4,10 @@
import com.databricks.sdk.support.Generated;
-/**
- * Current status of the model version. Newly created model versions start in PENDING_REGISTRATION
- * status, then move to READY status once the model version files are uploaded and the model version
- * is finalized. Only model versions in READY status can be loaded for inference or served.
- */
@Generated
public enum ModelVersionInfoStatus {
FAILED_REGISTRATION,
+ MODEL_VERSION_STATUS_UNKNOWN,
PENDING_REGISTRATION,
READY,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/Privilege.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/Privilege.java
index 9d4ea5c05..5b76c8967 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/Privilege.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/Privilege.java
@@ -34,6 +34,7 @@ public enum Privilege {
CREATE_VOLUME,
EXECUTE,
EXECUTE_CLEAN_ROOM_TASK,
+ EXTERNAL_USE_SCHEMA,
MANAGE,
MANAGE_ALLOWLIST,
MODIFY,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelAlias.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelAlias.java
index 3bfde7bc5..043755b1d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelAlias.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelAlias.java
@@ -7,13 +7,28 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** Registered model alias. */
@Generated
public class RegisteredModelAlias {
/** Name of the alias, e.g. 'champion' or 'latest_stable' */
@JsonProperty("alias_name")
private String aliasName;
+ /** The name of the catalog containing the model version */
+ @JsonProperty("catalog_name")
+ private String catalogName;
+
+ /** The unique identifier of the alias */
+ @JsonProperty("id")
+ private String id;
+
+ /** The name of the parent registered model of the model version, relative to parent schema */
+ @JsonProperty("model_name")
+ private String modelName;
+
+ /** The name of the schema containing the model version, relative to parent catalog */
+ @JsonProperty("schema_name")
+ private String schemaName;
+
/** Integer version number of the model version to which this alias points. */
@JsonProperty("version_num")
private Long versionNum;
@@ -27,6 +42,42 @@ public String getAliasName() {
return aliasName;
}
+ public RegisteredModelAlias setCatalogName(String catalogName) {
+ this.catalogName = catalogName;
+ return this;
+ }
+
+ public String getCatalogName() {
+ return catalogName;
+ }
+
+ public RegisteredModelAlias setId(String id) {
+ this.id = id;
+ return this;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public RegisteredModelAlias setModelName(String modelName) {
+ this.modelName = modelName;
+ return this;
+ }
+
+ public String getModelName() {
+ return modelName;
+ }
+
+ public RegisteredModelAlias setSchemaName(String schemaName) {
+ this.schemaName = schemaName;
+ return this;
+ }
+
+ public String getSchemaName() {
+ return schemaName;
+ }
+
public RegisteredModelAlias setVersionNum(Long versionNum) {
this.versionNum = versionNum;
return this;
@@ -41,18 +92,27 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RegisteredModelAlias that = (RegisteredModelAlias) o;
- return Objects.equals(aliasName, that.aliasName) && Objects.equals(versionNum, that.versionNum);
+ return Objects.equals(aliasName, that.aliasName)
+ && Objects.equals(catalogName, that.catalogName)
+ && Objects.equals(id, that.id)
+ && Objects.equals(modelName, that.modelName)
+ && Objects.equals(schemaName, that.schemaName)
+ && Objects.equals(versionNum, that.versionNum);
}
@Override
public int hashCode() {
- return Objects.hash(aliasName, versionNum);
+ return Objects.hash(aliasName, catalogName, id, modelName, schemaName, versionNum);
}
@Override
public String toString() {
return new ToStringer(RegisteredModelAlias.class)
.add("aliasName", aliasName)
+ .add("catalogName", catalogName)
+ .add("id", id)
+ .add("modelName", modelName)
+ .add("schemaName", schemaName)
.add("versionNum", versionNum)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsAPI.java
index 608438656..e5c508180 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsAPI.java
@@ -30,8 +30,8 @@
* metadata (comments, aliases) create a new model version, or update permissions on the registered
* model, users must be owners of the registered model.
*
- * Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants)
- * that specify a securable type, use "FUNCTION" as the securable type.
+ *
Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants)
+ * that specify a securable type, use FUNCTION as the securable type.
*/
@Generated
public class RegisteredModelsAPI {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java
index a03772d04..ccc99737b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/RegisteredModelsService.java
@@ -26,8 +26,8 @@
* metadata (comments, aliases) create a new model version, or update permissions on the registered
* model, users must be owners of the registered model.
*
- *
Note: The securable type for models is "FUNCTION". When using REST APIs (e.g. tagging, grants)
- * that specify a securable type, use "FUNCTION" as the securable type.
+ *
Note: The securable type for models is FUNCTION. When using REST APIs (e.g. tagging, grants)
+ * that specify a securable type, use FUNCTION as the securable type.
*
*
This is the high-level interface, that contains generated methods.
*
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java
index fd09c0225..a02ad9204 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SecurableKind.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
-/** Latest kind: CONNECTION_PALANTIR_OAUTH_M2M = 263; Next id:264 */
+/** Latest kind: CONNECTION_REDSHIFT_IAM = 265; Next id:266 */
@Generated
public enum SecurableKind {
TABLE_DB_STORAGE,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java
index 3c6c39fde..a024d5ded 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SetRegisteredModelAliasRequest.java
@@ -4,18 +4,17 @@
import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
@Generated
public class SetRegisteredModelAliasRequest {
/** The name of the alias */
- @JsonProperty("alias")
- private String alias;
+ @JsonIgnore private String alias;
- /** Full name of the registered model */
- @JsonProperty("full_name")
- private String fullName;
+ /** The three-level (fully qualified) name of the registered model */
+ @JsonIgnore private String fullName;
/** The version number of the model version to which the alias points */
@JsonProperty("version_num")
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java
index 324e1b850..527a800b8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/SystemType.java
@@ -25,6 +25,7 @@ public enum SystemType {
SAP,
SERVICENOW,
SNOWFLAKE,
+ STREAM_NATIVE,
TABLEAU,
TERADATA,
WORKDAY,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java
new file mode 100755
index 000000000..215f0eacf
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsMetastore.java
@@ -0,0 +1,134 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateAccountsMetastore {
+ /**
+ * The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta
+ * Sharing as the official name.
+ */
+ @JsonProperty("delta_sharing_organization_name")
+ private String deltaSharingOrganizationName;
+
+ /** The lifetime of delta sharing recipient token in seconds. */
+ @JsonProperty("delta_sharing_recipient_token_lifetime_in_seconds")
+ private Long deltaSharingRecipientTokenLifetimeInSeconds;
+
+ /** The scope of Delta Sharing enabled for the metastore. */
+ @JsonProperty("delta_sharing_scope")
+ private DeltaSharingScopeEnum deltaSharingScope;
+
+ /** The owner of the metastore. */
+ @JsonProperty("owner")
+ private String owner;
+
+ /** Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). */
+ @JsonProperty("privilege_model_version")
+ private String privilegeModelVersion;
+
+ /** UUID of storage credential to access the metastore storage_root. */
+ @JsonProperty("storage_root_credential_id")
+ private String storageRootCredentialId;
+
+ public UpdateAccountsMetastore setDeltaSharingOrganizationName(
+ String deltaSharingOrganizationName) {
+ this.deltaSharingOrganizationName = deltaSharingOrganizationName;
+ return this;
+ }
+
+ public String getDeltaSharingOrganizationName() {
+ return deltaSharingOrganizationName;
+ }
+
+ public UpdateAccountsMetastore setDeltaSharingRecipientTokenLifetimeInSeconds(
+ Long deltaSharingRecipientTokenLifetimeInSeconds) {
+ this.deltaSharingRecipientTokenLifetimeInSeconds = deltaSharingRecipientTokenLifetimeInSeconds;
+ return this;
+ }
+
+ public Long getDeltaSharingRecipientTokenLifetimeInSeconds() {
+ return deltaSharingRecipientTokenLifetimeInSeconds;
+ }
+
+ public UpdateAccountsMetastore setDeltaSharingScope(DeltaSharingScopeEnum deltaSharingScope) {
+ this.deltaSharingScope = deltaSharingScope;
+ return this;
+ }
+
+ public DeltaSharingScopeEnum getDeltaSharingScope() {
+ return deltaSharingScope;
+ }
+
+ public UpdateAccountsMetastore setOwner(String owner) {
+ this.owner = owner;
+ return this;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public UpdateAccountsMetastore setPrivilegeModelVersion(String privilegeModelVersion) {
+ this.privilegeModelVersion = privilegeModelVersion;
+ return this;
+ }
+
+ public String getPrivilegeModelVersion() {
+ return privilegeModelVersion;
+ }
+
+ public UpdateAccountsMetastore setStorageRootCredentialId(String storageRootCredentialId) {
+ this.storageRootCredentialId = storageRootCredentialId;
+ return this;
+ }
+
+ public String getStorageRootCredentialId() {
+ return storageRootCredentialId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateAccountsMetastore that = (UpdateAccountsMetastore) o;
+ return Objects.equals(deltaSharingOrganizationName, that.deltaSharingOrganizationName)
+ && Objects.equals(
+ deltaSharingRecipientTokenLifetimeInSeconds,
+ that.deltaSharingRecipientTokenLifetimeInSeconds)
+ && Objects.equals(deltaSharingScope, that.deltaSharingScope)
+ && Objects.equals(owner, that.owner)
+ && Objects.equals(privilegeModelVersion, that.privilegeModelVersion)
+ && Objects.equals(storageRootCredentialId, that.storageRootCredentialId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ deltaSharingOrganizationName,
+ deltaSharingRecipientTokenLifetimeInSeconds,
+ deltaSharingScope,
+ owner,
+ privilegeModelVersion,
+ storageRootCredentialId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateAccountsMetastore.class)
+ .add("deltaSharingOrganizationName", deltaSharingOrganizationName)
+ .add(
+ "deltaSharingRecipientTokenLifetimeInSeconds",
+ deltaSharingRecipientTokenLifetimeInSeconds)
+ .add("deltaSharingScope", deltaSharingScope)
+ .add("owner", owner)
+ .add("privilegeModelVersion", privilegeModelVersion)
+ .add("storageRootCredentialId", storageRootCredentialId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java
new file mode 100755
index 000000000..22801de40
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateAccountsStorageCredential.java
@@ -0,0 +1,183 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.catalog;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateAccountsStorageCredential {
+ /** The AWS IAM role configuration. */
+ @JsonProperty("aws_iam_role")
+ private AwsIamRoleRequest awsIamRole;
+
+ /** The Azure managed identity configuration. */
+ @JsonProperty("azure_managed_identity")
+ private AzureManagedIdentityResponse azureManagedIdentity;
+
+ /** The Azure service principal configuration. */
+ @JsonProperty("azure_service_principal")
+ private AzureServicePrincipal azureServicePrincipal;
+
+ /** The Cloudflare API token configuration. */
+ @JsonProperty("cloudflare_api_token")
+ private CloudflareApiToken cloudflareApiToken;
+
+ /** Comment associated with the credential. */
+ @JsonProperty("comment")
+ private String comment;
+
+ /** The Databricks managed GCP service account configuration. */
+ @JsonProperty("databricks_gcp_service_account")
+ private DatabricksGcpServiceAccountRequest databricksGcpServiceAccount;
+
+ /**
+ * Whether the current securable is accessible from all workspaces or a specific set of
+ * workspaces.
+ */
+ @JsonProperty("isolation_mode")
+ private IsolationMode isolationMode;
+
+ /** Username of current owner of credential. */
+ @JsonProperty("owner")
+ private String owner;
+
+ /**
+ * Whether the credential is usable only for read operations. Only applicable when purpose is
+ * **STORAGE**.
+ */
+ @JsonProperty("read_only")
+ private Boolean readOnly;
+
+ public UpdateAccountsStorageCredential setAwsIamRole(AwsIamRoleRequest awsIamRole) {
+ this.awsIamRole = awsIamRole;
+ return this;
+ }
+
+ public AwsIamRoleRequest getAwsIamRole() {
+ return awsIamRole;
+ }
+
+ public UpdateAccountsStorageCredential setAzureManagedIdentity(
+ AzureManagedIdentityResponse azureManagedIdentity) {
+ this.azureManagedIdentity = azureManagedIdentity;
+ return this;
+ }
+
+ public AzureManagedIdentityResponse getAzureManagedIdentity() {
+ return azureManagedIdentity;
+ }
+
+ public UpdateAccountsStorageCredential setAzureServicePrincipal(
+ AzureServicePrincipal azureServicePrincipal) {
+ this.azureServicePrincipal = azureServicePrincipal;
+ return this;
+ }
+
+ public AzureServicePrincipal getAzureServicePrincipal() {
+ return azureServicePrincipal;
+ }
+
+ public UpdateAccountsStorageCredential setCloudflareApiToken(
+ CloudflareApiToken cloudflareApiToken) {
+ this.cloudflareApiToken = cloudflareApiToken;
+ return this;
+ }
+
+ public CloudflareApiToken getCloudflareApiToken() {
+ return cloudflareApiToken;
+ }
+
+ public UpdateAccountsStorageCredential setComment(String comment) {
+ this.comment = comment;
+ return this;
+ }
+
+ public String getComment() {
+ return comment;
+ }
+
+ public UpdateAccountsStorageCredential setDatabricksGcpServiceAccount(
+ DatabricksGcpServiceAccountRequest databricksGcpServiceAccount) {
+ this.databricksGcpServiceAccount = databricksGcpServiceAccount;
+ return this;
+ }
+
+ public DatabricksGcpServiceAccountRequest getDatabricksGcpServiceAccount() {
+ return databricksGcpServiceAccount;
+ }
+
+ public UpdateAccountsStorageCredential setIsolationMode(IsolationMode isolationMode) {
+ this.isolationMode = isolationMode;
+ return this;
+ }
+
+ public IsolationMode getIsolationMode() {
+ return isolationMode;
+ }
+
+ public UpdateAccountsStorageCredential setOwner(String owner) {
+ this.owner = owner;
+ return this;
+ }
+
+ public String getOwner() {
+ return owner;
+ }
+
+ public UpdateAccountsStorageCredential setReadOnly(Boolean readOnly) {
+ this.readOnly = readOnly;
+ return this;
+ }
+
+ public Boolean getReadOnly() {
+ return readOnly;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateAccountsStorageCredential that = (UpdateAccountsStorageCredential) o;
+ return Objects.equals(awsIamRole, that.awsIamRole)
+ && Objects.equals(azureManagedIdentity, that.azureManagedIdentity)
+ && Objects.equals(azureServicePrincipal, that.azureServicePrincipal)
+ && Objects.equals(cloudflareApiToken, that.cloudflareApiToken)
+ && Objects.equals(comment, that.comment)
+ && Objects.equals(databricksGcpServiceAccount, that.databricksGcpServiceAccount)
+ && Objects.equals(isolationMode, that.isolationMode)
+ && Objects.equals(owner, that.owner)
+ && Objects.equals(readOnly, that.readOnly);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ awsIamRole,
+ azureManagedIdentity,
+ azureServicePrincipal,
+ cloudflareApiToken,
+ comment,
+ databricksGcpServiceAccount,
+ isolationMode,
+ owner,
+ readOnly);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateAccountsStorageCredential.class)
+ .add("awsIamRole", awsIamRole)
+ .add("azureManagedIdentity", azureManagedIdentity)
+ .add("azureServicePrincipal", azureServicePrincipal)
+ .add("cloudflareApiToken", cloudflareApiToken)
+ .add("comment", comment)
+ .add("databricksGcpServiceAccount", databricksGcpServiceAccount)
+ .add("isolationMode", isolationMode)
+ .add("owner", owner)
+ .add("readOnly", readOnly)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java
index a785536bf..bdbf23c12 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateFunction.java
@@ -16,7 +16,7 @@ public class UpdateFunction {
*/
@JsonIgnore private String name;
- /** Username of current owner of function. */
+ /** Username of current owner of the function. */
@JsonProperty("owner")
private String owner;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java
index 4e220749c..b49178472 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateModelVersionRequest.java
@@ -6,20 +6,114 @@
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
import java.util.Objects;
@Generated
public class UpdateModelVersionRequest {
+ /** List of aliases associated with the model version */
+ @JsonProperty("aliases")
+ private Collection aliases;
+
+ /** The name of the catalog containing the model version */
+ @JsonProperty("catalog_name")
+ private String catalogName;
+
/** The comment attached to the model version */
@JsonProperty("comment")
private String comment;
+ /** */
+ @JsonProperty("created_at")
+ private Long createdAt;
+
+ /** The identifier of the user who created the model version */
+ @JsonProperty("created_by")
+ private String createdBy;
+
/** The three-level (fully qualified) name of the model version */
@JsonIgnore private String fullName;
+ /** The unique identifier of the model version */
+ @JsonProperty("id")
+ private String id;
+
+ /** The unique identifier of the metastore containing the model version */
+ @JsonProperty("metastore_id")
+ private String metastoreId;
+
+ /** The name of the parent registered model of the model version, relative to parent schema */
+ @JsonProperty("model_name")
+ private String modelName;
+
+ /** Model version dependencies, for feature-store packaged models */
+ @JsonProperty("model_version_dependencies")
+ private DependencyList modelVersionDependencies;
+
+ /**
+ * MLflow run ID used when creating the model version, if ``source`` was generated by an
+ * experiment run stored in an MLflow tracking server
+ */
+ @JsonProperty("run_id")
+ private String runId;
+
+ /**
+ * ID of the Databricks workspace containing the MLflow run that generated this model version, if
+ * applicable
+ */
+ @JsonProperty("run_workspace_id")
+ private Long runWorkspaceId;
+
+ /** The name of the schema containing the model version, relative to parent catalog */
+ @JsonProperty("schema_name")
+ private String schemaName;
+
+ /** URI indicating the location of the source artifacts (files) for the model version */
+ @JsonProperty("source")
+ private String source;
+
+ /**
+ * Current status of the model version. Newly created model versions start in PENDING_REGISTRATION
+ * status, then move to READY status once the model version files are uploaded and the model
+ * version is finalized. Only model versions in READY status can be loaded for inference or
+ * served.
+ */
+ @JsonProperty("status")
+ private ModelVersionInfoStatus status;
+
+ /** The storage location on the cloud under which model version data files are stored */
+ @JsonProperty("storage_location")
+ private String storageLocation;
+
+ /** */
+ @JsonProperty("updated_at")
+ private Long updatedAt;
+
+ /** The identifier of the user who updated the model version last time */
+ @JsonProperty("updated_by")
+ private String updatedBy;
+
/** The integer version number of the model version */
@JsonIgnore private Long version;
+ public UpdateModelVersionRequest setAliases(Collection aliases) {
+ this.aliases = aliases;
+ return this;
+ }
+
+ public Collection getAliases() {
+ return aliases;
+ }
+
+ public UpdateModelVersionRequest setCatalogName(String catalogName) {
+ this.catalogName = catalogName;
+ return this;
+ }
+
+ public String getCatalogName() {
+ return catalogName;
+ }
+
public UpdateModelVersionRequest setComment(String comment) {
this.comment = comment;
return this;
@@ -29,6 +123,24 @@ public String getComment() {
return comment;
}
+ public UpdateModelVersionRequest setCreatedAt(Long createdAt) {
+ this.createdAt = createdAt;
+ return this;
+ }
+
+ public Long getCreatedAt() {
+ return createdAt;
+ }
+
+ public UpdateModelVersionRequest setCreatedBy(String createdBy) {
+ this.createdBy = createdBy;
+ return this;
+ }
+
+ public String getCreatedBy() {
+ return createdBy;
+ }
+
public UpdateModelVersionRequest setFullName(String fullName) {
this.fullName = fullName;
return this;
@@ -38,6 +150,115 @@ public String getFullName() {
return fullName;
}
+ public UpdateModelVersionRequest setId(String id) {
+ this.id = id;
+ return this;
+ }
+
+ public String getId() {
+ return id;
+ }
+
+ public UpdateModelVersionRequest setMetastoreId(String metastoreId) {
+ this.metastoreId = metastoreId;
+ return this;
+ }
+
+ public String getMetastoreId() {
+ return metastoreId;
+ }
+
+ public UpdateModelVersionRequest setModelName(String modelName) {
+ this.modelName = modelName;
+ return this;
+ }
+
+ public String getModelName() {
+ return modelName;
+ }
+
+ public UpdateModelVersionRequest setModelVersionDependencies(
+ DependencyList modelVersionDependencies) {
+ this.modelVersionDependencies = modelVersionDependencies;
+ return this;
+ }
+
+ public DependencyList getModelVersionDependencies() {
+ return modelVersionDependencies;
+ }
+
+ public UpdateModelVersionRequest setRunId(String runId) {
+ this.runId = runId;
+ return this;
+ }
+
+ public String getRunId() {
+ return runId;
+ }
+
+ public UpdateModelVersionRequest setRunWorkspaceId(Long runWorkspaceId) {
+ this.runWorkspaceId = runWorkspaceId;
+ return this;
+ }
+
+ public Long getRunWorkspaceId() {
+ return runWorkspaceId;
+ }
+
+ public UpdateModelVersionRequest setSchemaName(String schemaName) {
+ this.schemaName = schemaName;
+ return this;
+ }
+
+ public String getSchemaName() {
+ return schemaName;
+ }
+
+ public UpdateModelVersionRequest setSource(String source) {
+ this.source = source;
+ return this;
+ }
+
+ public String getSource() {
+ return source;
+ }
+
+ public UpdateModelVersionRequest setStatus(ModelVersionInfoStatus status) {
+ this.status = status;
+ return this;
+ }
+
+ public ModelVersionInfoStatus getStatus() {
+ return status;
+ }
+
+ public UpdateModelVersionRequest setStorageLocation(String storageLocation) {
+ this.storageLocation = storageLocation;
+ return this;
+ }
+
+ public String getStorageLocation() {
+ return storageLocation;
+ }
+
+ public UpdateModelVersionRequest setUpdatedAt(Long updatedAt) {
+ this.updatedAt = updatedAt;
+ return this;
+ }
+
+ public Long getUpdatedAt() {
+ return updatedAt;
+ }
+
+ public UpdateModelVersionRequest setUpdatedBy(String updatedBy) {
+ this.updatedBy = updatedBy;
+ return this;
+ }
+
+ public String getUpdatedBy() {
+ return updatedBy;
+ }
+
public UpdateModelVersionRequest setVersion(Long version) {
this.version = version;
return this;
@@ -52,21 +273,72 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UpdateModelVersionRequest that = (UpdateModelVersionRequest) o;
- return Objects.equals(comment, that.comment)
+ return Objects.equals(aliases, that.aliases)
+ && Objects.equals(catalogName, that.catalogName)
+ && Objects.equals(comment, that.comment)
+ && Objects.equals(createdAt, that.createdAt)
+ && Objects.equals(createdBy, that.createdBy)
&& Objects.equals(fullName, that.fullName)
+ && Objects.equals(id, that.id)
+ && Objects.equals(metastoreId, that.metastoreId)
+ && Objects.equals(modelName, that.modelName)
+ && Objects.equals(modelVersionDependencies, that.modelVersionDependencies)
+ && Objects.equals(runId, that.runId)
+ && Objects.equals(runWorkspaceId, that.runWorkspaceId)
+ && Objects.equals(schemaName, that.schemaName)
+ && Objects.equals(source, that.source)
+ && Objects.equals(status, that.status)
+ && Objects.equals(storageLocation, that.storageLocation)
+ && Objects.equals(updatedAt, that.updatedAt)
+ && Objects.equals(updatedBy, that.updatedBy)
&& Objects.equals(version, that.version);
}
@Override
public int hashCode() {
- return Objects.hash(comment, fullName, version);
+ return Objects.hash(
+ aliases,
+ catalogName,
+ comment,
+ createdAt,
+ createdBy,
+ fullName,
+ id,
+ metastoreId,
+ modelName,
+ modelVersionDependencies,
+ runId,
+ runWorkspaceId,
+ schemaName,
+ source,
+ status,
+ storageLocation,
+ updatedAt,
+ updatedBy,
+ version);
}
@Override
public String toString() {
return new ToStringer(UpdateModelVersionRequest.class)
+ .add("aliases", aliases)
+ .add("catalogName", catalogName)
.add("comment", comment)
+ .add("createdAt", createdAt)
+ .add("createdBy", createdBy)
.add("fullName", fullName)
+ .add("id", id)
+ .add("metastoreId", metastoreId)
+ .add("modelName", modelName)
+ .add("modelVersionDependencies", modelVersionDependencies)
+ .add("runId", runId)
+ .add("runWorkspaceId", runWorkspaceId)
+ .add("schemaName", schemaName)
+ .add("source", source)
+ .add("status", status)
+ .add("storageLocation", storageLocation)
+ .add("updatedAt", updatedAt)
+ .add("updatedBy", updatedBy)
.add("version", version)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateRegisteredModelRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateRegisteredModelRequest.java
index d7fdfe4b0..963ea7bc6 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateRegisteredModelRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/UpdateRegisteredModelRequest.java
@@ -6,17 +6,49 @@
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
import java.util.Objects;
@Generated
public class UpdateRegisteredModelRequest {
+ /** List of aliases associated with the registered model */
+ @JsonProperty("aliases")
+ private Collection aliases;
+
+ /**
+ * Indicates whether the principal is limited to retrieving metadata for the associated object
+ * through the BROWSE privilege when include_browse is enabled in the request.
+ */
+ @JsonProperty("browse_only")
+ private Boolean browseOnly;
+
+ /** The name of the catalog where the schema and the registered model reside */
+ @JsonProperty("catalog_name")
+ private String catalogName;
+
/** The comment attached to the registered model */
@JsonProperty("comment")
private String comment;
+ /** Creation timestamp of the registered model in milliseconds since the Unix epoch */
+ @JsonProperty("created_at")
+ private Long createdAt;
+
+ /** The identifier of the user who created the registered model */
+ @JsonProperty("created_by")
+ private String createdBy;
+
/** The three-level (fully qualified) name of the registered model */
@JsonIgnore private String fullName;
+ /** The unique identifier of the metastore */
+ @JsonProperty("metastore_id")
+ private String metastoreId;
+
+ /** The name of the registered model */
+ @JsonProperty("name")
+ private String name;
+
/** New name for the registered model. */
@JsonProperty("new_name")
private String newName;
@@ -25,6 +57,49 @@ public class UpdateRegisteredModelRequest {
@JsonProperty("owner")
private String owner;
+ /** The name of the schema where the registered model resides */
+ @JsonProperty("schema_name")
+ private String schemaName;
+
+ /** The storage location on the cloud under which model version data files are stored */
+ @JsonProperty("storage_location")
+ private String storageLocation;
+
+ /** Last-update timestamp of the registered model in milliseconds since the Unix epoch */
+ @JsonProperty("updated_at")
+ private Long updatedAt;
+
+ /** The identifier of the user who updated the registered model last time */
+ @JsonProperty("updated_by")
+ private String updatedBy;
+
+ public UpdateRegisteredModelRequest setAliases(Collection aliases) {
+ this.aliases = aliases;
+ return this;
+ }
+
+ public Collection getAliases() {
+ return aliases;
+ }
+
+ public UpdateRegisteredModelRequest setBrowseOnly(Boolean browseOnly) {
+ this.browseOnly = browseOnly;
+ return this;
+ }
+
+ public Boolean getBrowseOnly() {
+ return browseOnly;
+ }
+
+ public UpdateRegisteredModelRequest setCatalogName(String catalogName) {
+ this.catalogName = catalogName;
+ return this;
+ }
+
+ public String getCatalogName() {
+ return catalogName;
+ }
+
public UpdateRegisteredModelRequest setComment(String comment) {
this.comment = comment;
return this;
@@ -34,6 +109,24 @@ public String getComment() {
return comment;
}
+ public UpdateRegisteredModelRequest setCreatedAt(Long createdAt) {
+ this.createdAt = createdAt;
+ return this;
+ }
+
+ public Long getCreatedAt() {
+ return createdAt;
+ }
+
+ public UpdateRegisteredModelRequest setCreatedBy(String createdBy) {
+ this.createdBy = createdBy;
+ return this;
+ }
+
+ public String getCreatedBy() {
+ return createdBy;
+ }
+
public UpdateRegisteredModelRequest setFullName(String fullName) {
this.fullName = fullName;
return this;
@@ -43,6 +136,24 @@ public String getFullName() {
return fullName;
}
+ public UpdateRegisteredModelRequest setMetastoreId(String metastoreId) {
+ this.metastoreId = metastoreId;
+ return this;
+ }
+
+ public String getMetastoreId() {
+ return metastoreId;
+ }
+
+ public UpdateRegisteredModelRequest setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
public UpdateRegisteredModelRequest setNewName(String newName) {
this.newName = newName;
return this;
@@ -61,29 +172,102 @@ public String getOwner() {
return owner;
}
+ public UpdateRegisteredModelRequest setSchemaName(String schemaName) {
+ this.schemaName = schemaName;
+ return this;
+ }
+
+ public String getSchemaName() {
+ return schemaName;
+ }
+
+ public UpdateRegisteredModelRequest setStorageLocation(String storageLocation) {
+ this.storageLocation = storageLocation;
+ return this;
+ }
+
+ public String getStorageLocation() {
+ return storageLocation;
+ }
+
+ public UpdateRegisteredModelRequest setUpdatedAt(Long updatedAt) {
+ this.updatedAt = updatedAt;
+ return this;
+ }
+
+ public Long getUpdatedAt() {
+ return updatedAt;
+ }
+
+ public UpdateRegisteredModelRequest setUpdatedBy(String updatedBy) {
+ this.updatedBy = updatedBy;
+ return this;
+ }
+
+ public String getUpdatedBy() {
+ return updatedBy;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UpdateRegisteredModelRequest that = (UpdateRegisteredModelRequest) o;
- return Objects.equals(comment, that.comment)
+ return Objects.equals(aliases, that.aliases)
+ && Objects.equals(browseOnly, that.browseOnly)
+ && Objects.equals(catalogName, that.catalogName)
+ && Objects.equals(comment, that.comment)
+ && Objects.equals(createdAt, that.createdAt)
+ && Objects.equals(createdBy, that.createdBy)
&& Objects.equals(fullName, that.fullName)
+ && Objects.equals(metastoreId, that.metastoreId)
+ && Objects.equals(name, that.name)
&& Objects.equals(newName, that.newName)
- && Objects.equals(owner, that.owner);
+ && Objects.equals(owner, that.owner)
+ && Objects.equals(schemaName, that.schemaName)
+ && Objects.equals(storageLocation, that.storageLocation)
+ && Objects.equals(updatedAt, that.updatedAt)
+ && Objects.equals(updatedBy, that.updatedBy);
}
@Override
public int hashCode() {
- return Objects.hash(comment, fullName, newName, owner);
+ return Objects.hash(
+ aliases,
+ browseOnly,
+ catalogName,
+ comment,
+ createdAt,
+ createdBy,
+ fullName,
+ metastoreId,
+ name,
+ newName,
+ owner,
+ schemaName,
+ storageLocation,
+ updatedAt,
+ updatedBy);
}
@Override
public String toString() {
return new ToStringer(UpdateRegisteredModelRequest.class)
+ .add("aliases", aliases)
+ .add("browseOnly", browseOnly)
+ .add("catalogName", catalogName)
.add("comment", comment)
+ .add("createdAt", createdAt)
+ .add("createdBy", createdBy)
.add("fullName", fullName)
+ .add("metastoreId", metastoreId)
+ .add("name", name)
.add("newName", newName)
.add("owner", owner)
+ .add("schemaName", schemaName)
+ .add("storageLocation", storageLocation)
+ .add("updatedAt", updatedAt)
+ .add("updatedBy", updatedBy)
.toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeInfo.java
index 21ac9b83e..3f74f8647 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeInfo.java
@@ -76,7 +76,13 @@ public class VolumeInfo {
@JsonProperty("volume_id")
private String volumeId;
- /** */
+ /**
+ * The type of the volume. An external volume is located in the specified external location. A
+ * managed volume is located in the default location which is specified by the parent schema, or
+ * the parent catalog, or the Metastore. [Learn more]
+ *
+ * [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external
+ */
@JsonProperty("volume_type")
private VolumeType volumeType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java
index fcb9f83ac..044f72a39 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumeType.java
@@ -4,13 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * The type of the volume. An external volume is located in the specified external location. A
- * managed volume is located in the default location which is specified by the parent schema, or the
- * parent catalog, or the Metastore. [Learn more]
- *
- *
[Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external
- */
@Generated
public enum VolumeType {
EXTERNAL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java
index 609ef16ba..7b0ea8f0a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesAPI.java
@@ -78,7 +78,7 @@ public Iterable list(String catalogName, String schemaName) {
*
* The returned volumes are filtered based on the privileges of the calling user. For example,
* the metastore admin is able to list all the volumes. A regular user needs to be the owner or
- * have the **READ VOLUME** privilege on the volume to recieve the volumes in the response. For
+ * have the **READ VOLUME** privilege on the volume to receive the volumes in the response. For
* the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the
* parent catalog and the **USE_SCHEMA** privilege on the parent schema.
*
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java
index fe725c7ef..7ff906c48 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/catalog/VolumesService.java
@@ -52,7 +52,7 @@ public interface VolumesService {
*
*
The returned volumes are filtered based on the privileges of the calling user. For example,
* the metastore admin is able to list all the volumes. A regular user needs to be the owner or
- * have the **READ VOLUME** privilege on the volume to recieve the volumes in the response. For
+ * have the **READ VOLUME** privilege on the volume to receive the volumes in the response. For
* the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the
* parent catalog and the **USE_SCHEMA** privilege on the parent schema.
*
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java
index 28acb8090..7e131ef28 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/Environment.java
@@ -36,13 +36,9 @@ public class Environment {
@JsonProperty("environment_version")
private String environmentVersion;
- /** Use `java_dependencies` instead. */
- @JsonProperty("jar_dependencies")
- private Collection jarDependencies;
-
/**
- * List of jar dependencies, should be string representing volume paths. For example:
- * `/Volumes/path/to/test.jar`.
+ * List of java dependencies. Each dependency is a string representing a java library path. For
+ * example: `/Volumes/path/to/test.jar`.
*/
@JsonProperty("java_dependencies")
private Collection javaDependencies;
@@ -74,15 +70,6 @@ public String getEnvironmentVersion() {
return environmentVersion;
}
- public Environment setJarDependencies(Collection jarDependencies) {
- this.jarDependencies = jarDependencies;
- return this;
- }
-
- public Collection getJarDependencies() {
- return jarDependencies;
- }
-
public Environment setJavaDependencies(Collection javaDependencies) {
this.javaDependencies = javaDependencies;
return this;
@@ -100,14 +87,12 @@ public boolean equals(Object o) {
return Objects.equals(client, that.client)
&& Objects.equals(dependencies, that.dependencies)
&& Objects.equals(environmentVersion, that.environmentVersion)
- && Objects.equals(jarDependencies, that.jarDependencies)
&& Objects.equals(javaDependencies, that.javaDependencies);
}
@Override
public int hashCode() {
- return Objects.hash(
- client, dependencies, environmentVersion, jarDependencies, javaDependencies);
+ return Objects.hash(client, dependencies, environmentVersion, javaDependencies);
}
@Override
@@ -116,7 +101,6 @@ public String toString() {
.add("client", client)
.add("dependencies", dependencies)
.add("environmentVersion", environmentVersion)
- .add("jarDependencies", jarDependencies)
.add("javaDependencies", javaDependencies)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/TerminationReasonCode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/TerminationReasonCode.java
index 18917be99..2dcf7125a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/TerminationReasonCode.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/TerminationReasonCode.java
@@ -45,6 +45,7 @@ public enum TerminationReasonCode {
BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG,
BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED,
BUDGET_POLICY_RESOLUTION_FAILURE,
+ CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED,
CLOUD_ACCOUNT_SETUP_FAILURE,
CLOUD_OPERATION_CANCELLED,
CLOUD_PROVIDER_DISK_SETUP_FAILURE,
@@ -122,6 +123,7 @@ public enum TerminationReasonCode {
IN_PENALTY_BOX,
IP_EXHAUSTION_FAILURE,
JOB_FINISHED,
+ K8S_ACTIVE_POD_QUOTA_EXCEEDED,
K8S_AUTOSCALING_FAILURE,
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT,
LAZY_ALLOCATION_TIMEOUT,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieQueryAttachment.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieQueryAttachment.java
index 5ad10ce62..cc0a48ab5 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieQueryAttachment.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieQueryAttachment.java
@@ -5,6 +5,7 @@
import com.databricks.sdk.support.Generated;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
import java.util.Objects;
@Generated
@@ -21,6 +22,10 @@ public class GenieQueryAttachment {
@JsonProperty("last_updated_timestamp")
private Long lastUpdatedTimestamp;
+ /** */
+ @JsonProperty("parameters")
+ private Collection parameters;
+
/** AI generated SQL query */
@JsonProperty("query")
private String query;
@@ -67,6 +72,15 @@ public Long getLastUpdatedTimestamp() {
return lastUpdatedTimestamp;
}
+ public GenieQueryAttachment setParameters(Collection parameters) {
+ this.parameters = parameters;
+ return this;
+ }
+
+ public Collection getParameters() {
+ return parameters;
+ }
+
public GenieQueryAttachment setQuery(String query) {
this.query = query;
return this;
@@ -111,6 +125,7 @@ public boolean equals(Object o) {
return Objects.equals(description, that.description)
&& Objects.equals(id, that.id)
&& Objects.equals(lastUpdatedTimestamp, that.lastUpdatedTimestamp)
+ && Objects.equals(parameters, that.parameters)
&& Objects.equals(query, that.query)
&& Objects.equals(queryResultMetadata, that.queryResultMetadata)
&& Objects.equals(statementId, that.statementId)
@@ -120,7 +135,14 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
return Objects.hash(
- description, id, lastUpdatedTimestamp, query, queryResultMetadata, statementId, title);
+ description,
+ id,
+ lastUpdatedTimestamp,
+ parameters,
+ query,
+ queryResultMetadata,
+ statementId,
+ title);
}
@Override
@@ -129,6 +151,7 @@ public String toString() {
.add("description", description)
.add("id", id)
.add("lastUpdatedTimestamp", lastUpdatedTimestamp)
+ .add("parameters", parameters)
.add("query", query)
.add("queryResultMetadata", queryResultMetadata)
.add("statementId", statementId)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MessageErrorType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MessageErrorType.java
index 8b6b10fc7..75e28eb70 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MessageErrorType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/MessageErrorType.java
@@ -18,6 +18,7 @@ public enum MessageErrorType {
DESCRIBE_QUERY_INVALID_SQL_ERROR,
DESCRIBE_QUERY_TIMEOUT,
DESCRIBE_QUERY_UNEXPECTED_FAILURE,
+ EXCEEDED_MAX_TOKEN_LENGTH_EXCEPTION,
FUNCTIONS_NOT_AVAILABLE_EXCEPTION,
FUNCTION_ARGUMENTS_INVALID_EXCEPTION,
FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION,
@@ -28,6 +29,9 @@ public enum MessageErrorType {
GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION,
GENERIC_SQL_EXEC_API_CALL_EXCEPTION,
ILLEGAL_PARAMETER_DEFINITION_EXCEPTION,
+ INTERNAL_CATALOG_ASSET_CREATION_FAILED_EXCEPTION,
+ INTERNAL_CATALOG_ASSET_CREATION_ONGOING_EXCEPTION,
+ INTERNAL_CATALOG_ASSET_CREATION_UNSUPPORTED_EXCEPTION,
INTERNAL_CATALOG_MISSING_UC_PATH_EXCEPTION,
INTERNAL_CATALOG_PATH_OVERLAP_EXCEPTION,
INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/QueryAttachmentParameter.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/QueryAttachmentParameter.java
new file mode 100755
index 000000000..e2609ee6a
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/QueryAttachmentParameter.java
@@ -0,0 +1,74 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dashboards;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class QueryAttachmentParameter {
+ /** */
+ @JsonProperty("keyword")
+ private String keyword;
+
+ /** */
+ @JsonProperty("sql_type")
+ private String sqlType;
+
+ /** */
+ @JsonProperty("value")
+ private String value;
+
+ public QueryAttachmentParameter setKeyword(String keyword) {
+ this.keyword = keyword;
+ return this;
+ }
+
+ public String getKeyword() {
+ return keyword;
+ }
+
+ public QueryAttachmentParameter setSqlType(String sqlType) {
+ this.sqlType = sqlType;
+ return this;
+ }
+
+ public String getSqlType() {
+ return sqlType;
+ }
+
+ public QueryAttachmentParameter setValue(String value) {
+ this.value = value;
+ return this;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ QueryAttachmentParameter that = (QueryAttachmentParameter) o;
+ return Objects.equals(keyword, that.keyword)
+ && Objects.equals(sqlType, that.sqlType)
+ && Objects.equals(value, that.value);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(keyword, sqlType, value);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(QueryAttachmentParameter.class)
+ .add("keyword", keyword)
+ .add("sqlType", sqlType)
+ .add("value", value)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseInstanceRoleRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseInstanceRoleRequest.java
index af69b9394..643688431 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseInstanceRoleRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CreateDatabaseInstanceRoleRequest.java
@@ -3,6 +3,7 @@
package com.databricks.sdk.service.database;
import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
@@ -10,6 +11,11 @@
@Generated
public class CreateDatabaseInstanceRoleRequest {
+ /** */
+ @JsonIgnore
+ @QueryParam("database_instance_name")
+ private String databaseInstanceName;
+
/** */
@JsonProperty("database_instance_role")
private DatabaseInstanceRole databaseInstanceRole;
@@ -17,6 +23,15 @@ public class CreateDatabaseInstanceRoleRequest {
/** */
@JsonIgnore private String instanceName;
+ public CreateDatabaseInstanceRoleRequest setDatabaseInstanceName(String databaseInstanceName) {
+ this.databaseInstanceName = databaseInstanceName;
+ return this;
+ }
+
+ public String getDatabaseInstanceName() {
+ return databaseInstanceName;
+ }
+
public CreateDatabaseInstanceRoleRequest setDatabaseInstanceRole(
DatabaseInstanceRole databaseInstanceRole) {
this.databaseInstanceRole = databaseInstanceRole;
@@ -41,18 +56,20 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateDatabaseInstanceRoleRequest that = (CreateDatabaseInstanceRoleRequest) o;
- return Objects.equals(databaseInstanceRole, that.databaseInstanceRole)
+ return Objects.equals(databaseInstanceName, that.databaseInstanceName)
+ && Objects.equals(databaseInstanceRole, that.databaseInstanceRole)
&& Objects.equals(instanceName, that.instanceName);
}
@Override
public int hashCode() {
- return Objects.hash(databaseInstanceRole, instanceName);
+ return Objects.hash(databaseInstanceName, databaseInstanceRole, instanceName);
}
@Override
public String toString() {
return new ToStringer(CreateDatabaseInstanceRoleRequest.class)
+ .add("databaseInstanceName", databaseInstanceName)
.add("databaseInstanceRole", databaseInstanceRole)
.add("instanceName", instanceName)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CustomTag.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CustomTag.java
new file mode 100755
index 000000000..61833a248
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/CustomTag.java
@@ -0,0 +1,55 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.database;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CustomTag {
+ /** The key of the custom tag. */
+ @JsonProperty("key")
+ private String key;
+
+ /** The value of the custom tag. */
+ @JsonProperty("value")
+ private String value;
+
+ public CustomTag setKey(String key) {
+ this.key = key;
+ return this;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ public CustomTag setValue(String value) {
+ this.value = value;
+ return this;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CustomTag that = (CustomTag) o;
+ return Objects.equals(key, that.key) && Objects.equals(value, that.value);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(key, value);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CustomTag.class).add("key", key).add("value", value).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstance.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstance.java
index 077608170..f52760290 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstance.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstance.java
@@ -29,10 +29,21 @@ public class DatabaseInstance {
@JsonProperty("creator")
private String creator;
+ /**
+ * Custom tags associated with the instance. This field is only included on create and update
+ * responses.
+ */
+ @JsonProperty("custom_tags")
+ private Collection customTags;
+
/** Deprecated. The sku of the instance; this field will always match the value of capacity. */
@JsonProperty("effective_capacity")
private String effectiveCapacity;
+ /** The recorded custom tags associated with the instance. */
+ @JsonProperty("effective_custom_tags")
+ private Collection effectiveCustomTags;
+
/** Whether the instance has PG native password login enabled. */
@JsonProperty("effective_enable_pg_native_login")
private Boolean effectiveEnablePgNativeLogin;
@@ -59,6 +70,10 @@ public class DatabaseInstance {
@JsonProperty("effective_stopped")
private Boolean effectiveStopped;
+ /** The policy that is applied to the instance. */
+ @JsonProperty("effective_usage_policy_id")
+ private String effectiveUsagePolicyId;
+
/** Whether to enable PG native password login on the instance. Defaults to false. */
@JsonProperty("enable_pg_native_login")
private Boolean enablePgNativeLogin;
@@ -121,6 +136,10 @@ public class DatabaseInstance {
@JsonProperty("uid")
private String uid;
+ /** The desired usage policy to associate with the instance. */
+ @JsonProperty("usage_policy_id")
+ private String usagePolicyId;
+
public DatabaseInstance setCapacity(String capacity) {
this.capacity = capacity;
return this;
@@ -157,6 +176,15 @@ public String getCreator() {
return creator;
}
+ public DatabaseInstance setCustomTags(Collection customTags) {
+ this.customTags = customTags;
+ return this;
+ }
+
+ public Collection getCustomTags() {
+ return customTags;
+ }
+
public DatabaseInstance setEffectiveCapacity(String effectiveCapacity) {
this.effectiveCapacity = effectiveCapacity;
return this;
@@ -166,6 +194,15 @@ public String getEffectiveCapacity() {
return effectiveCapacity;
}
+ public DatabaseInstance setEffectiveCustomTags(Collection effectiveCustomTags) {
+ this.effectiveCustomTags = effectiveCustomTags;
+ return this;
+ }
+
+ public Collection getEffectiveCustomTags() {
+ return effectiveCustomTags;
+ }
+
public DatabaseInstance setEffectiveEnablePgNativeLogin(Boolean effectiveEnablePgNativeLogin) {
this.effectiveEnablePgNativeLogin = effectiveEnablePgNativeLogin;
return this;
@@ -212,6 +249,15 @@ public Boolean getEffectiveStopped() {
return effectiveStopped;
}
+ public DatabaseInstance setEffectiveUsagePolicyId(String effectiveUsagePolicyId) {
+ this.effectiveUsagePolicyId = effectiveUsagePolicyId;
+ return this;
+ }
+
+ public String getEffectiveUsagePolicyId() {
+ return effectiveUsagePolicyId;
+ }
+
public DatabaseInstance setEnablePgNativeLogin(Boolean enablePgNativeLogin) {
this.enablePgNativeLogin = enablePgNativeLogin;
return this;
@@ -320,6 +366,15 @@ public String getUid() {
return uid;
}
+ public DatabaseInstance setUsagePolicyId(String usagePolicyId) {
+ this.usagePolicyId = usagePolicyId;
+ return this;
+ }
+
+ public String getUsagePolicyId() {
+ return usagePolicyId;
+ }
+
@Override
public boolean equals(Object o) {
if (this == o) return true;
@@ -329,13 +384,16 @@ public boolean equals(Object o) {
&& Objects.equals(childInstanceRefs, that.childInstanceRefs)
&& Objects.equals(creationTime, that.creationTime)
&& Objects.equals(creator, that.creator)
+ && Objects.equals(customTags, that.customTags)
&& Objects.equals(effectiveCapacity, that.effectiveCapacity)
+ && Objects.equals(effectiveCustomTags, that.effectiveCustomTags)
&& Objects.equals(effectiveEnablePgNativeLogin, that.effectiveEnablePgNativeLogin)
&& Objects.equals(
effectiveEnableReadableSecondaries, that.effectiveEnableReadableSecondaries)
&& Objects.equals(effectiveNodeCount, that.effectiveNodeCount)
&& Objects.equals(effectiveRetentionWindowInDays, that.effectiveRetentionWindowInDays)
&& Objects.equals(effectiveStopped, that.effectiveStopped)
+ && Objects.equals(effectiveUsagePolicyId, that.effectiveUsagePolicyId)
&& Objects.equals(enablePgNativeLogin, that.enablePgNativeLogin)
&& Objects.equals(enableReadableSecondaries, that.enableReadableSecondaries)
&& Objects.equals(name, that.name)
@@ -347,7 +405,8 @@ public boolean equals(Object o) {
&& Objects.equals(retentionWindowInDays, that.retentionWindowInDays)
&& Objects.equals(state, that.state)
&& Objects.equals(stopped, that.stopped)
- && Objects.equals(uid, that.uid);
+ && Objects.equals(uid, that.uid)
+ && Objects.equals(usagePolicyId, that.usagePolicyId);
}
@Override
@@ -357,12 +416,15 @@ public int hashCode() {
childInstanceRefs,
creationTime,
creator,
+ customTags,
effectiveCapacity,
+ effectiveCustomTags,
effectiveEnablePgNativeLogin,
effectiveEnableReadableSecondaries,
effectiveNodeCount,
effectiveRetentionWindowInDays,
effectiveStopped,
+ effectiveUsagePolicyId,
enablePgNativeLogin,
enableReadableSecondaries,
name,
@@ -374,7 +436,8 @@ public int hashCode() {
retentionWindowInDays,
state,
stopped,
- uid);
+ uid,
+ usagePolicyId);
}
@Override
@@ -384,12 +447,15 @@ public String toString() {
.add("childInstanceRefs", childInstanceRefs)
.add("creationTime", creationTime)
.add("creator", creator)
+ .add("customTags", customTags)
.add("effectiveCapacity", effectiveCapacity)
+ .add("effectiveCustomTags", effectiveCustomTags)
.add("effectiveEnablePgNativeLogin", effectiveEnablePgNativeLogin)
.add("effectiveEnableReadableSecondaries", effectiveEnableReadableSecondaries)
.add("effectiveNodeCount", effectiveNodeCount)
.add("effectiveRetentionWindowInDays", effectiveRetentionWindowInDays)
.add("effectiveStopped", effectiveStopped)
+ .add("effectiveUsagePolicyId", effectiveUsagePolicyId)
.add("enablePgNativeLogin", enablePgNativeLogin)
.add("enableReadableSecondaries", enableReadableSecondaries)
.add("name", name)
@@ -402,6 +468,7 @@ public String toString() {
.add("state", state)
.add("stopped", stopped)
.add("uid", uid)
+ .add("usagePolicyId", usagePolicyId)
.toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstanceRole.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstanceRole.java
index cc9b99b4c..9f0c7010a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstanceRole.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/database/DatabaseInstanceRole.java
@@ -10,14 +10,22 @@
/** A DatabaseInstanceRole represents a Postgres role in a database instance. */
@Generated
public class DatabaseInstanceRole {
- /** API-exposed Postgres role attributes */
+ /** The desired API-exposed Postgres role attribute to associate with the role. Optional. */
@JsonProperty("attributes")
private DatabaseInstanceRoleAttributes attributes;
+ /** The attributes that are applied to the role. */
+ @JsonProperty("effective_attributes")
+ private DatabaseInstanceRoleAttributes effectiveAttributes;
+
/** The type of the role. */
@JsonProperty("identity_type")
private DatabaseInstanceRoleIdentityType identityType;
+ /** */
+ @JsonProperty("instance_name")
+ private String instanceName;
+
/** An enum value for a standard role that this role is a member of. */
@JsonProperty("membership_role")
private DatabaseInstanceRoleMembershipRole membershipRole;
@@ -35,6 +43,16 @@ public DatabaseInstanceRoleAttributes getAttributes() {
return attributes;
}
+ public DatabaseInstanceRole setEffectiveAttributes(
+ DatabaseInstanceRoleAttributes effectiveAttributes) {
+ this.effectiveAttributes = effectiveAttributes;
+ return this;
+ }
+
+ public DatabaseInstanceRoleAttributes getEffectiveAttributes() {
+ return effectiveAttributes;
+ }
+
public DatabaseInstanceRole setIdentityType(DatabaseInstanceRoleIdentityType identityType) {
this.identityType = identityType;
return this;
@@ -44,6 +62,15 @@ public DatabaseInstanceRoleIdentityType getIdentityType() {
return identityType;
}
+ public DatabaseInstanceRole setInstanceName(String instanceName) {
+ this.instanceName = instanceName;
+ return this;
+ }
+
+ public String getInstanceName() {
+ return instanceName;
+ }
+
public DatabaseInstanceRole setMembershipRole(DatabaseInstanceRoleMembershipRole membershipRole) {
this.membershipRole = membershipRole;
return this;
@@ -68,21 +95,26 @@ public boolean equals(Object o) {
if (o == null || getClass() != o.getClass()) return false;
DatabaseInstanceRole that = (DatabaseInstanceRole) o;
return Objects.equals(attributes, that.attributes)
+ && Objects.equals(effectiveAttributes, that.effectiveAttributes)
&& Objects.equals(identityType, that.identityType)
+ && Objects.equals(instanceName, that.instanceName)
&& Objects.equals(membershipRole, that.membershipRole)
&& Objects.equals(name, that.name);
}
@Override
public int hashCode() {
- return Objects.hash(attributes, identityType, membershipRole, name);
+ return Objects.hash(
+ attributes, effectiveAttributes, identityType, instanceName, membershipRole, name);
}
@Override
public String toString() {
return new ToStringer(DatabaseInstanceRole.class)
.add("attributes", attributes)
+ .add("effectiveAttributes", effectiveAttributes)
.add("identityType", identityType)
+ .add("instanceName", instanceName)
.add("membershipRole", membershipRole)
.add("name", name)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AggregationGranularity.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AggregationGranularity.java
new file mode 100755
index 000000000..1bff8d647
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AggregationGranularity.java
@@ -0,0 +1,20 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** The granularity for aggregating data into time windows based on their timestamp. */
+@Generated
+public enum AggregationGranularity {
+ AGGREGATION_GRANULARITY_1_DAY,
+ AGGREGATION_GRANULARITY_1_HOUR,
+ AGGREGATION_GRANULARITY_1_MONTH,
+ AGGREGATION_GRANULARITY_1_WEEK,
+ AGGREGATION_GRANULARITY_1_YEAR,
+ AGGREGATION_GRANULARITY_2_WEEKS,
+ AGGREGATION_GRANULARITY_30_MINUTES,
+ AGGREGATION_GRANULARITY_3_WEEKS,
+ AGGREGATION_GRANULARITY_4_WEEKS,
+ AGGREGATION_GRANULARITY_5_MINUTES,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AnomalyDetectionConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AnomalyDetectionConfig.java
new file mode 100755
index 000000000..6fd6e421c
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/AnomalyDetectionConfig.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** Anomaly Detection Configurations. */
+@Generated
+public class AnomalyDetectionConfig {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AnomalyDetectionConfig.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshRequest.java
new file mode 100755
index 000000000..4a6c289a5
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshRequest.java
@@ -0,0 +1,72 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+/** Request to cancel a refresh. */
+@Generated
+public class CancelRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ /** Unique id of the refresh operation. */
+ @JsonIgnore private Long refreshId;
+
+ public CancelRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public CancelRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public CancelRefreshRequest setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CancelRefreshRequest that = (CancelRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refreshId, that.refreshId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refreshId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CancelRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refreshId", refreshId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshResponse.java
new file mode 100755
index 000000000..3f81c4845
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CancelRefreshResponse.java
@@ -0,0 +1,43 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** Response to cancelling a refresh. */
+@Generated
+public class CancelRefreshResponse {
+ /** The refresh to cancel. */
+ @JsonProperty("refresh")
+ private Refresh refresh;
+
+ public CancelRefreshResponse setRefresh(Refresh refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public Refresh getRefresh() {
+ return refresh;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CancelRefreshResponse that = (CancelRefreshResponse) o;
+ return Objects.equals(refresh, that.refresh);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(refresh);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CancelRefreshResponse.class).add("refresh", refresh).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateMonitorRequest.java
new file mode 100755
index 000000000..b98cb431c
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateMonitorRequest.java
@@ -0,0 +1,42 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CreateMonitorRequest {
+ /** The monitor to create. */
+ @JsonProperty("monitor")
+ private Monitor monitor;
+
+ public CreateMonitorRequest setMonitor(Monitor monitor) {
+ this.monitor = monitor;
+ return this;
+ }
+
+ public Monitor getMonitor() {
+ return monitor;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CreateMonitorRequest that = (CreateMonitorRequest) o;
+ return Objects.equals(monitor, that.monitor);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(monitor);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CreateMonitorRequest.class).add("monitor", monitor).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateRefreshRequest.java
new file mode 100755
index 000000000..2c8422332
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CreateRefreshRequest.java
@@ -0,0 +1,73 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CreateRefreshRequest {
+ /** The UUID of the request object. For example, table id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: table. */
+ @JsonIgnore private String objectType;
+
+ /** The refresh to create */
+ @JsonProperty("refresh")
+ private Refresh refresh;
+
+ public CreateRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public CreateRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public CreateRefreshRequest setRefresh(Refresh refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public Refresh getRefresh() {
+ return refresh;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CreateRefreshRequest that = (CreateRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refresh, that.refresh);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refresh);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CreateRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refresh", refresh)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedule.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedule.java
new file mode 100755
index 000000000..72f07e684
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedule.java
@@ -0,0 +1,85 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The data quality monitoring workflow cron schedule. */
+@Generated
+public class CronSchedule {
+ /** Read only field that indicates whether the schedule is paused or not. */
+ @JsonProperty("pause_status")
+ private CronSchedulePauseStatus pauseStatus;
+
+ /**
+ * The expression that determines when to run the monitor. See [examples].
+ *
+ * [examples]:
+ * https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html
+ */
+ @JsonProperty("quartz_cron_expression")
+ private String quartzCronExpression;
+
+ /**
+ * A Java timezone id. The schedule for a job will be resolved with respect to this timezone. See
+ * `Java TimeZone `_ for
+ * details. The timezone id (e.g., ``America/Los_Angeles``) in which to evaluate the quartz
+ * expression.
+ */
+ @JsonProperty("timezone_id")
+ private String timezoneId;
+
+ public CronSchedule setPauseStatus(CronSchedulePauseStatus pauseStatus) {
+ this.pauseStatus = pauseStatus;
+ return this;
+ }
+
+ public CronSchedulePauseStatus getPauseStatus() {
+ return pauseStatus;
+ }
+
+ public CronSchedule setQuartzCronExpression(String quartzCronExpression) {
+ this.quartzCronExpression = quartzCronExpression;
+ return this;
+ }
+
+ public String getQuartzCronExpression() {
+ return quartzCronExpression;
+ }
+
+ public CronSchedule setTimezoneId(String timezoneId) {
+ this.timezoneId = timezoneId;
+ return this;
+ }
+
+ public String getTimezoneId() {
+ return timezoneId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CronSchedule that = (CronSchedule) o;
+ return Objects.equals(pauseStatus, that.pauseStatus)
+ && Objects.equals(quartzCronExpression, that.quartzCronExpression)
+ && Objects.equals(timezoneId, that.timezoneId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(pauseStatus, quartzCronExpression, timezoneId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CronSchedule.class)
+ .add("pauseStatus", pauseStatus)
+ .add("quartzCronExpression", quartzCronExpression)
+ .add("timezoneId", timezoneId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedulePauseStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedulePauseStatus.java
new file mode 100755
index 000000000..55d5db475
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/CronSchedulePauseStatus.java
@@ -0,0 +1,12 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** The data quality monitoring workflow cron schedule pause status. */
+@Generated
+public enum CronSchedulePauseStatus {
+ CRON_SCHEDULE_PAUSE_STATUS_PAUSED,
+ CRON_SCHEDULE_PAUSE_STATUS_UNPAUSED,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingConfig.java
new file mode 100755
index 000000000..24c38e762
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingConfig.java
@@ -0,0 +1,373 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Data Profiling Configurations. */
+@Generated
+public class DataProfilingConfig {
+ /**
+ * Field for specifying the absolute path to a custom directory to store data-monitoring assets.
+ * Normally prepopulated to a default user location via UI and Python APIs.
+ */
+ @JsonProperty("assets_dir")
+ private String assetsDir;
+
+ /**
+ * Baseline table name. Baseline data is used to compute drift from the data in the monitored
+ * `table_name`. The baseline table and the monitored table shall have the same schema.
+ */
+ @JsonProperty("baseline_table_name")
+ private String baselineTableName;
+
+ /** Custom metrics. */
+ @JsonProperty("custom_metrics")
+ private Collection customMetrics;
+
+ /**
+ * Id of dashboard that visualizes the computed metrics. This can be empty if the monitor is in
+ * PENDING state.
+ */
+ @JsonProperty("dashboard_id")
+ private String dashboardId;
+
+ /** Table that stores drift metrics data. Format: `catalog.schema.table_name`. */
+ @JsonProperty("drift_metrics_table_name")
+ private String driftMetricsTableName;
+
+ /** The warehouse for dashboard creation */
+ @JsonProperty("effective_warehouse_id")
+ private String effectiveWarehouseId;
+
+ /** Configuration for monitoring inference log tables. */
+ @JsonProperty("inference_log")
+ private InferenceLogConfig inferenceLog;
+
+ /** The latest error message for a monitor failure. */
+ @JsonProperty("latest_monitor_failure_message")
+ private String latestMonitorFailureMessage;
+
+ /**
+ * Represents the current monitor configuration version in use. The version will be represented in
+ * a numeric fashion (1,2,3...). The field has flexibility to take on negative values, which can
+ * indicate corrupted monitor_version numbers.
+ */
+ @JsonProperty("monitor_version")
+ private Long monitorVersion;
+
+ /** Unity Catalog table to monitor. Format: `catalog.schema.table_name` */
+ @JsonProperty("monitored_table_name")
+ private String monitoredTableName;
+
+ /** Field for specifying notification settings. */
+ @JsonProperty("notification_settings")
+ private NotificationSettings notificationSettings;
+
+ /** ID of the schema where output tables are created. */
+ @JsonProperty("output_schema_id")
+ private String outputSchemaId;
+
+ /** Table that stores profile metrics data. Format: `catalog.schema.table_name`. */
+ @JsonProperty("profile_metrics_table_name")
+ private String profileMetricsTableName;
+
+ /** The cron schedule. */
+ @JsonProperty("schedule")
+ private CronSchedule schedule;
+
+ /** Whether to skip creating a default dashboard summarizing data quality metrics. */
+ @JsonProperty("skip_builtin_dashboard")
+ private Boolean skipBuiltinDashboard;
+
+ /**
+ * List of column expressions to slice data with for targeted analysis. The data is grouped by
+ * each expression independently, resulting in a separate slice for each predicate and its
+ * complements. For example `slicing_exprs=[“col_1”, “col_2 > 10”]` will generate the following
+ * slices: two slices for `col_2 > 10` (True and False), and one slice per unique value in `col1`.
+ * For high-cardinality columns, only the top 100 unique values by frequency will generate slices.
+ */
+ @JsonProperty("slicing_exprs")
+ private Collection slicingExprs;
+
+ /** Configuration for monitoring snapshot tables. */
+ @JsonProperty("snapshot")
+ private SnapshotConfig snapshot;
+
+ /** The data profiling monitor status. */
+ @JsonProperty("status")
+ private DataProfilingStatus status;
+
+ /** Configuration for monitoring time series tables. */
+ @JsonProperty("time_series")
+ private TimeSeriesConfig timeSeries;
+
+ /**
+ * Optional argument to specify the warehouse for dashboard creation. If not specified, the first
+ * running warehouse will be used.
+ */
+ @JsonProperty("warehouse_id")
+ private String warehouseId;
+
+ public DataProfilingConfig setAssetsDir(String assetsDir) {
+ this.assetsDir = assetsDir;
+ return this;
+ }
+
+ public String getAssetsDir() {
+ return assetsDir;
+ }
+
+ public DataProfilingConfig setBaselineTableName(String baselineTableName) {
+ this.baselineTableName = baselineTableName;
+ return this;
+ }
+
+ public String getBaselineTableName() {
+ return baselineTableName;
+ }
+
+ public DataProfilingConfig setCustomMetrics(Collection customMetrics) {
+ this.customMetrics = customMetrics;
+ return this;
+ }
+
+ public Collection getCustomMetrics() {
+ return customMetrics;
+ }
+
+ public DataProfilingConfig setDashboardId(String dashboardId) {
+ this.dashboardId = dashboardId;
+ return this;
+ }
+
+ public String getDashboardId() {
+ return dashboardId;
+ }
+
+ public DataProfilingConfig setDriftMetricsTableName(String driftMetricsTableName) {
+ this.driftMetricsTableName = driftMetricsTableName;
+ return this;
+ }
+
+ public String getDriftMetricsTableName() {
+ return driftMetricsTableName;
+ }
+
+ public DataProfilingConfig setEffectiveWarehouseId(String effectiveWarehouseId) {
+ this.effectiveWarehouseId = effectiveWarehouseId;
+ return this;
+ }
+
+ public String getEffectiveWarehouseId() {
+ return effectiveWarehouseId;
+ }
+
+ public DataProfilingConfig setInferenceLog(InferenceLogConfig inferenceLog) {
+ this.inferenceLog = inferenceLog;
+ return this;
+ }
+
+ public InferenceLogConfig getInferenceLog() {
+ return inferenceLog;
+ }
+
+ public DataProfilingConfig setLatestMonitorFailureMessage(String latestMonitorFailureMessage) {
+ this.latestMonitorFailureMessage = latestMonitorFailureMessage;
+ return this;
+ }
+
+ public String getLatestMonitorFailureMessage() {
+ return latestMonitorFailureMessage;
+ }
+
+ public DataProfilingConfig setMonitorVersion(Long monitorVersion) {
+ this.monitorVersion = monitorVersion;
+ return this;
+ }
+
+ public Long getMonitorVersion() {
+ return monitorVersion;
+ }
+
+ public DataProfilingConfig setMonitoredTableName(String monitoredTableName) {
+ this.monitoredTableName = monitoredTableName;
+ return this;
+ }
+
+ public String getMonitoredTableName() {
+ return monitoredTableName;
+ }
+
+ public DataProfilingConfig setNotificationSettings(NotificationSettings notificationSettings) {
+ this.notificationSettings = notificationSettings;
+ return this;
+ }
+
+ public NotificationSettings getNotificationSettings() {
+ return notificationSettings;
+ }
+
+ public DataProfilingConfig setOutputSchemaId(String outputSchemaId) {
+ this.outputSchemaId = outputSchemaId;
+ return this;
+ }
+
+ public String getOutputSchemaId() {
+ return outputSchemaId;
+ }
+
+ public DataProfilingConfig setProfileMetricsTableName(String profileMetricsTableName) {
+ this.profileMetricsTableName = profileMetricsTableName;
+ return this;
+ }
+
+ public String getProfileMetricsTableName() {
+ return profileMetricsTableName;
+ }
+
+ public DataProfilingConfig setSchedule(CronSchedule schedule) {
+ this.schedule = schedule;
+ return this;
+ }
+
+ public CronSchedule getSchedule() {
+ return schedule;
+ }
+
+ public DataProfilingConfig setSkipBuiltinDashboard(Boolean skipBuiltinDashboard) {
+ this.skipBuiltinDashboard = skipBuiltinDashboard;
+ return this;
+ }
+
+ public Boolean getSkipBuiltinDashboard() {
+ return skipBuiltinDashboard;
+ }
+
+ public DataProfilingConfig setSlicingExprs(Collection slicingExprs) {
+ this.slicingExprs = slicingExprs;
+ return this;
+ }
+
+ public Collection getSlicingExprs() {
+ return slicingExprs;
+ }
+
+ public DataProfilingConfig setSnapshot(SnapshotConfig snapshot) {
+ this.snapshot = snapshot;
+ return this;
+ }
+
+ public SnapshotConfig getSnapshot() {
+ return snapshot;
+ }
+
+ public DataProfilingConfig setStatus(DataProfilingStatus status) {
+ this.status = status;
+ return this;
+ }
+
+ public DataProfilingStatus getStatus() {
+ return status;
+ }
+
+ public DataProfilingConfig setTimeSeries(TimeSeriesConfig timeSeries) {
+ this.timeSeries = timeSeries;
+ return this;
+ }
+
+ public TimeSeriesConfig getTimeSeries() {
+ return timeSeries;
+ }
+
+ public DataProfilingConfig setWarehouseId(String warehouseId) {
+ this.warehouseId = warehouseId;
+ return this;
+ }
+
+ public String getWarehouseId() {
+ return warehouseId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DataProfilingConfig that = (DataProfilingConfig) o;
+ return Objects.equals(assetsDir, that.assetsDir)
+ && Objects.equals(baselineTableName, that.baselineTableName)
+ && Objects.equals(customMetrics, that.customMetrics)
+ && Objects.equals(dashboardId, that.dashboardId)
+ && Objects.equals(driftMetricsTableName, that.driftMetricsTableName)
+ && Objects.equals(effectiveWarehouseId, that.effectiveWarehouseId)
+ && Objects.equals(inferenceLog, that.inferenceLog)
+ && Objects.equals(latestMonitorFailureMessage, that.latestMonitorFailureMessage)
+ && Objects.equals(monitorVersion, that.monitorVersion)
+ && Objects.equals(monitoredTableName, that.monitoredTableName)
+ && Objects.equals(notificationSettings, that.notificationSettings)
+ && Objects.equals(outputSchemaId, that.outputSchemaId)
+ && Objects.equals(profileMetricsTableName, that.profileMetricsTableName)
+ && Objects.equals(schedule, that.schedule)
+ && Objects.equals(skipBuiltinDashboard, that.skipBuiltinDashboard)
+ && Objects.equals(slicingExprs, that.slicingExprs)
+ && Objects.equals(snapshot, that.snapshot)
+ && Objects.equals(status, that.status)
+ && Objects.equals(timeSeries, that.timeSeries)
+ && Objects.equals(warehouseId, that.warehouseId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ assetsDir,
+ baselineTableName,
+ customMetrics,
+ dashboardId,
+ driftMetricsTableName,
+ effectiveWarehouseId,
+ inferenceLog,
+ latestMonitorFailureMessage,
+ monitorVersion,
+ monitoredTableName,
+ notificationSettings,
+ outputSchemaId,
+ profileMetricsTableName,
+ schedule,
+ skipBuiltinDashboard,
+ slicingExprs,
+ snapshot,
+ status,
+ timeSeries,
+ warehouseId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(DataProfilingConfig.class)
+ .add("assetsDir", assetsDir)
+ .add("baselineTableName", baselineTableName)
+ .add("customMetrics", customMetrics)
+ .add("dashboardId", dashboardId)
+ .add("driftMetricsTableName", driftMetricsTableName)
+ .add("effectiveWarehouseId", effectiveWarehouseId)
+ .add("inferenceLog", inferenceLog)
+ .add("latestMonitorFailureMessage", latestMonitorFailureMessage)
+ .add("monitorVersion", monitorVersion)
+ .add("monitoredTableName", monitoredTableName)
+ .add("notificationSettings", notificationSettings)
+ .add("outputSchemaId", outputSchemaId)
+ .add("profileMetricsTableName", profileMetricsTableName)
+ .add("schedule", schedule)
+ .add("skipBuiltinDashboard", skipBuiltinDashboard)
+ .add("slicingExprs", slicingExprs)
+ .add("snapshot", snapshot)
+ .add("status", status)
+ .add("timeSeries", timeSeries)
+ .add("warehouseId", warehouseId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetric.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetric.java
new file mode 100755
index 000000000..0fd468a79
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetric.java
@@ -0,0 +1,115 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Custom metric definition. */
+@Generated
+public class DataProfilingCustomMetric {
+ /**
+ * Jinja template for a SQL expression that specifies how to compute the metric. See [create
+ * metric definition].
+ *
+ * [create metric definition]:
+ * https://docs.databricks.com/en/lakehouse-monitoring/custom-metrics.html#create-definition
+ */
+ @JsonProperty("definition")
+ private String definition;
+
+ /**
+ * A list of column names in the input table the metric should be computed for. Can use
+ * ``":table"`` to indicate that the metric needs information from multiple columns.
+ */
+ @JsonProperty("input_columns")
+ private Collection inputColumns;
+
+ /** Name of the metric in the output tables. */
+ @JsonProperty("name")
+ private String name;
+
+ /** The output type of the custom metric. */
+ @JsonProperty("output_data_type")
+ private String outputDataType;
+
+ /** The type of the custom metric. */
+ @JsonProperty("type")
+ private DataProfilingCustomMetricType typeValue;
+
+ public DataProfilingCustomMetric setDefinition(String definition) {
+ this.definition = definition;
+ return this;
+ }
+
+ public String getDefinition() {
+ return definition;
+ }
+
+ public DataProfilingCustomMetric setInputColumns(Collection inputColumns) {
+ this.inputColumns = inputColumns;
+ return this;
+ }
+
+ public Collection getInputColumns() {
+ return inputColumns;
+ }
+
+ public DataProfilingCustomMetric setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ public DataProfilingCustomMetric setOutputDataType(String outputDataType) {
+ this.outputDataType = outputDataType;
+ return this;
+ }
+
+ public String getOutputDataType() {
+ return outputDataType;
+ }
+
+ public DataProfilingCustomMetric setType(DataProfilingCustomMetricType typeValue) {
+ this.typeValue = typeValue;
+ return this;
+ }
+
+ public DataProfilingCustomMetricType getType() {
+ return typeValue;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DataProfilingCustomMetric that = (DataProfilingCustomMetric) o;
+ return Objects.equals(definition, that.definition)
+ && Objects.equals(inputColumns, that.inputColumns)
+ && Objects.equals(name, that.name)
+ && Objects.equals(outputDataType, that.outputDataType)
+ && Objects.equals(typeValue, that.typeValue);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(definition, inputColumns, name, outputDataType, typeValue);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(DataProfilingCustomMetric.class)
+ .add("definition", definition)
+ .add("inputColumns", inputColumns)
+ .add("name", name)
+ .add("outputDataType", outputDataType)
+ .add("typeValue", typeValue)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetricType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetricType.java
new file mode 100755
index 000000000..63122073c
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingCustomMetricType.java
@@ -0,0 +1,13 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** The custom metric type. */
+@Generated
+public enum DataProfilingCustomMetricType {
+ DATA_PROFILING_CUSTOM_METRIC_TYPE_AGGREGATE,
+ DATA_PROFILING_CUSTOM_METRIC_TYPE_DERIVED,
+ DATA_PROFILING_CUSTOM_METRIC_TYPE_DRIFT,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingStatus.java
new file mode 100755
index 000000000..d49a4406d
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataProfilingStatus.java
@@ -0,0 +1,15 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** The status of the data profiling monitor. */
+@Generated
+public enum DataProfilingStatus {
+ DATA_PROFILING_STATUS_ACTIVE,
+ DATA_PROFILING_STATUS_DELETE_PENDING,
+ DATA_PROFILING_STATUS_ERROR,
+ DATA_PROFILING_STATUS_FAILED,
+ DATA_PROFILING_STATUS_PENDING,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityAPI.java
new file mode 100755
index 000000000..0226e6c69
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityAPI.java
@@ -0,0 +1,197 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.core.ApiClient;
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.Paginator;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/** Manage the data quality of Unity Catalog objects (currently support `schema` and `table`) */
+@Generated
+public class DataQualityAPI {
+ private static final Logger LOG = LoggerFactory.getLogger(DataQualityAPI.class);
+
+ private final DataQualityService impl;
+
+ /** Regular-use constructor */
+ public DataQualityAPI(ApiClient apiClient) {
+ impl = new DataQualityImpl(apiClient);
+ }
+
+ /** Constructor for mocks */
+ public DataQualityAPI(DataQualityService mock) {
+ impl = mock;
+ }
+
+ /**
+ * Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`.
+ */
+ public CancelRefreshResponse cancelRefresh(CancelRefreshRequest request) {
+ return impl.cancelRefresh(request);
+ }
+
+ /**
+ * Create a data quality monitor on a Unity Catalog object. The caller must provide either
+ * `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the
+ * table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent
+ * schema, and have **SELECT** access on the table. 3. have the following permissions: -
+ * **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
+ * be an owner of the table.
+ *
+ *
Workspace assets, such as the dashboard, will be created in the workspace where this call
+ * was made.
+ */
+ public Monitor createMonitor(CreateMonitorRequest request) {
+ return impl.createMonitor(request);
+ }
+
+ /**
+ * Creates a refresh. Currently only supported for the `table` `object_type`.
+ *
+ *
The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table
+ */
+ public Refresh createRefresh(CreateRefreshRequest request) {
+ return impl.createRefresh(request);
+ }
+
+ public void deleteMonitor(String objectType, String objectId) {
+ deleteMonitor(new DeleteMonitorRequest().setObjectType(objectType).setObjectId(objectId));
+ }
+
+ /**
+ * Delete a data quality monitor on Unity Catalog object.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ *
+ *
Note that the metric tables and dashboard will not be deleted as part of this call; those
+ * assets must be manually cleaned up (if desired).
+ */
+ public void deleteMonitor(DeleteMonitorRequest request) {
+ impl.deleteMonitor(request);
+ }
+
+ public void deleteRefresh(String objectType, String objectId, long refreshId) {
+ deleteRefresh(
+ new DeleteRefreshRequest()
+ .setObjectType(objectType)
+ .setObjectId(objectId)
+ .setRefreshId(refreshId));
+ }
+
+ /** (Unimplemented) Delete a refresh */
+ public void deleteRefresh(DeleteRefreshRequest request) {
+ impl.deleteRefresh(request);
+ }
+
+ public Monitor getMonitor(String objectType, String objectId) {
+ return getMonitor(new GetMonitorRequest().setObjectType(objectType).setObjectId(objectId));
+ }
+
+ /**
+ * Read a data quality monitor on Unity Catalog object.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema. 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ *
+ *
The returned information includes configuration values, as well as information on assets
+ * created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is
+ * in a different workspace than where the monitor was created.
+ */
+ public Monitor getMonitor(GetMonitorRequest request) {
+ return impl.getMonitor(request);
+ }
+
+ public Refresh getRefresh(String objectType, String objectId, long refreshId) {
+ return getRefresh(
+ new GetRefreshRequest()
+ .setObjectType(objectType)
+ .setObjectId(objectId)
+ .setRefreshId(refreshId));
+ }
+
+ /**
+ * Get data quality monitor refresh.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ public Refresh getRefresh(GetRefreshRequest request) {
+ return impl.getRefresh(request);
+ }
+
+ /** (Unimplemented) List data quality monitors. */
+ public Iterable listMonitor(ListMonitorRequest request) {
+ return new Paginator<>(
+ request,
+ impl::listMonitor,
+ ListMonitorResponse::getMonitors,
+ response -> {
+ String token = response.getNextPageToken();
+ if (token == null || token.isEmpty()) {
+ return null;
+ }
+ return request.setPageToken(token);
+ });
+ }
+
+ public Iterable listRefresh(String objectType, String objectId) {
+ return listRefresh(new ListRefreshRequest().setObjectType(objectType).setObjectId(objectId));
+ }
+
+ /**
+ * List data quality monitor refreshes.
+ *
+ * For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ public Iterable listRefresh(ListRefreshRequest request) {
+ return new Paginator<>(
+ request,
+ impl::listRefresh,
+ ListRefreshResponse::getRefreshes,
+ response -> {
+ String token = response.getNextPageToken();
+ if (token == null || token.isEmpty()) {
+ return null;
+ }
+ return request.setPageToken(token);
+ });
+ }
+
+ /**
+ * Update a data quality monitor on Unity Catalog object.
+ *
+ * For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ */
+ public Monitor updateMonitor(UpdateMonitorRequest request) {
+ return impl.updateMonitor(request);
+ }
+
+ /** (Unimplemented) Update a refresh */
+ public Refresh updateRefresh(UpdateRefreshRequest request) {
+ return impl.updateRefresh(request);
+ }
+
+ public DataQualityService impl() {
+ return impl;
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java
new file mode 100755
index 000000000..7411d79f5
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityImpl.java
@@ -0,0 +1,190 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.core.ApiClient;
+import com.databricks.sdk.core.DatabricksException;
+import com.databricks.sdk.core.http.Request;
+import com.databricks.sdk.support.Generated;
+import java.io.IOException;
+
+/** Package-local implementation of DataQuality */
+@Generated
+class DataQualityImpl implements DataQualityService {
+ private final ApiClient apiClient;
+
+ public DataQualityImpl(ApiClient apiClient) {
+ this.apiClient = apiClient;
+ }
+
+ @Override
+ public CancelRefreshResponse cancelRefresh(CancelRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s/cancel",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, CancelRefreshResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Monitor createMonitor(CreateMonitorRequest request) {
+ String path = "/api/data-quality/v1/monitors";
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request.getMonitor()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Monitor.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Refresh createRefresh(CreateRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes",
+ request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("POST", path, apiClient.serialize(request.getRefresh()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Refresh.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public void deleteMonitor(DeleteMonitorRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s", request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("DELETE", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ apiClient.execute(req, Void.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public void deleteRefresh(DeleteRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("DELETE", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ apiClient.execute(req, Void.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Monitor getMonitor(GetMonitorRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s", request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, Monitor.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Refresh getRefresh(GetRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, Refresh.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public ListMonitorResponse listMonitor(ListMonitorRequest request) {
+ String path = "/api/data-quality/v1/monitors";
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, ListMonitorResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public ListRefreshResponse listRefresh(ListRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes",
+ request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("GET", path);
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ return apiClient.execute(req, ListRefreshResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Monitor updateMonitor(UpdateMonitorRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s", request.getObjectType(), request.getObjectId());
+ try {
+ Request req = new Request("PATCH", path, apiClient.serialize(request.getMonitor()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Monitor.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
+ @Override
+ public Refresh updateRefresh(UpdateRefreshRequest request) {
+ String path =
+ String.format(
+ "/api/data-quality/v1/monitors/%s/%s/refreshes/%s",
+ request.getObjectType(), request.getObjectId(), request.getRefreshId());
+ try {
+ Request req = new Request("PATCH", path, apiClient.serialize(request.getRefresh()));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, Refresh.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java
new file mode 100755
index 000000000..1e5487768
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DataQualityService.java
@@ -0,0 +1,111 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/**
+ * Manage the data quality of Unity Catalog objects (currently support `schema` and `table`)
+ *
+ *
This is the high-level interface, that contains generated methods.
+ *
+ *
Evolving: this interface is under development. Method signatures may change.
+ */
+@Generated
+public interface DataQualityService {
+ /**
+ * Cancels a data quality monitor refresh. Currently only supported for the `table` `object_type`.
+ */
+ CancelRefreshResponse cancelRefresh(CancelRefreshRequest cancelRefreshRequest);
+
+ /**
+ * Create a data quality monitor on a Unity Catalog object. The caller must provide either
+ * `anomaly_detection_config` for a schema monitor or `data_profiling_config` for a table monitor.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog, have **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the
+ * table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of the table's parent
+ * schema, and have **SELECT** access on the table. 3. have the following permissions: -
+ * **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the table's parent schema -
+ * be an owner of the table.
+ *
+ *
Workspace assets, such as the dashboard, will be created in the workspace where this call
+ * was made.
+ */
+ Monitor createMonitor(CreateMonitorRequest createMonitorRequest);
+
+ /**
+ * Creates a refresh. Currently only supported for the `table` `object_type`.
+ *
+ *
The caller must either: 1. be an owner of the table's parent catalog 2. have **USE_CATALOG**
+ * on the table's parent catalog and be an owner of the table's parent schema 3. have the
+ * following permissions: - **USE_CATALOG** on the table's parent catalog - **USE_SCHEMA** on the
+ * table's parent schema - be an owner of the table
+ */
+ Refresh createRefresh(CreateRefreshRequest createRefreshRequest);
+
+ /**
+ * Delete a data quality monitor on Unity Catalog object.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ *
+ *
Note that the metric tables and dashboard will not be deleted as part of this call; those
+ * assets must be manually cleaned up (if desired).
+ */
+ void deleteMonitor(DeleteMonitorRequest deleteMonitorRequest);
+
+ /** (Unimplemented) Delete a refresh */
+ void deleteRefresh(DeleteRefreshRequest deleteRefreshRequest);
+
+ /**
+ * Read a data quality monitor on Unity Catalog object.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema. 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ *
+ *
The returned information includes configuration values, as well as information on assets
+ * created by the monitor. Some information (e.g., dashboard) may be filtered out if the caller is
+ * in a different workspace than where the monitor was created.
+ */
+ Monitor getMonitor(GetMonitorRequest getMonitorRequest);
+
+ /**
+ * Get data quality monitor refresh.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ Refresh getRefresh(GetRefreshRequest getRefreshRequest);
+
+ /** (Unimplemented) List data quality monitors. */
+ ListMonitorResponse listMonitor(ListMonitorRequest listMonitorRequest);
+
+ /**
+ * List data quality monitor refreshes.
+ *
+ *
For the `table` `object_type`, the caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - **SELECT** privilege on the table.
+ */
+ ListRefreshResponse listRefresh(ListRefreshRequest listRefreshRequest);
+
+ /**
+ * Update a data quality monitor on Unity Catalog object.
+ *
+ *
For the `table` `object_type`, The caller must either: 1. be an owner of the table's parent
+ * catalog 2. have **USE_CATALOG** on the table's parent catalog and be an owner of the table's
+ * parent schema 3. have the following permissions: - **USE_CATALOG** on the table's parent
+ * catalog - **USE_SCHEMA** on the table's parent schema - be an owner of the table.
+ */
+ Monitor updateMonitor(UpdateMonitorRequest updateMonitorRequest);
+
+ /** (Unimplemented) Update a refresh */
+ Refresh updateRefresh(UpdateRefreshRequest updateRefreshRequest);
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java
new file mode 100755
index 000000000..38895f088
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteMonitorRequest.java
@@ -0,0 +1,56 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class DeleteMonitorRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ public DeleteMonitorRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public DeleteMonitorRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DeleteMonitorRequest that = (DeleteMonitorRequest) o;
+ return Objects.equals(objectId, that.objectId) && Objects.equals(objectType, that.objectType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(DeleteMonitorRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java
new file mode 100755
index 000000000..386879bb7
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/DeleteRefreshRequest.java
@@ -0,0 +1,71 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class DeleteRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ /** Unique id of the refresh operation. */
+ @JsonIgnore private Long refreshId;
+
+ public DeleteRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public DeleteRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public DeleteRefreshRequest setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ DeleteRefreshRequest that = (DeleteRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refreshId, that.refreshId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refreshId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(DeleteRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refreshId", refreshId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java
new file mode 100755
index 000000000..5d18b90d7
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetMonitorRequest.java
@@ -0,0 +1,56 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class GetMonitorRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ public GetMonitorRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public GetMonitorRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetMonitorRequest that = (GetMonitorRequest) o;
+ return Objects.equals(objectId, that.objectId) && Objects.equals(objectType, that.objectType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetMonitorRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java
new file mode 100755
index 000000000..0e4950e81
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/GetRefreshRequest.java
@@ -0,0 +1,71 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class GetRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ /** Unique id of the refresh operation. */
+ @JsonIgnore private Long refreshId;
+
+ public GetRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public GetRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public GetRefreshRequest setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GetRefreshRequest that = (GetRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refreshId, that.refreshId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refreshId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GetRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refreshId", refreshId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java
new file mode 100755
index 000000000..0411b284a
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceLogConfig.java
@@ -0,0 +1,145 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Inference log configuration. */
+@Generated
+public class InferenceLogConfig {
+ /**
+ * List of granularities to use when aggregating data into time windows based on their timestamp.
+ */
+ @JsonProperty("granularities")
+ private Collection granularities;
+
+ /** Column for the label. */
+ @JsonProperty("label_column")
+ private String labelColumn;
+
+ /** Column for the model identifier. */
+ @JsonProperty("model_id_column")
+ private String modelIdColumn;
+
+ /** Column for the prediction. */
+ @JsonProperty("prediction_column")
+ private String predictionColumn;
+
+ /** Column for prediction probabilities */
+ @JsonProperty("prediction_probability_column")
+ private String predictionProbabilityColumn;
+
+ /** Problem type the model aims to solve. */
+ @JsonProperty("problem_type")
+ private InferenceProblemType problemType;
+
+ /** Column for the timestamp. */
+ @JsonProperty("timestamp_column")
+ private String timestampColumn;
+
+ public InferenceLogConfig setGranularities(Collection granularities) {
+ this.granularities = granularities;
+ return this;
+ }
+
+ public Collection getGranularities() {
+ return granularities;
+ }
+
+ public InferenceLogConfig setLabelColumn(String labelColumn) {
+ this.labelColumn = labelColumn;
+ return this;
+ }
+
+ public String getLabelColumn() {
+ return labelColumn;
+ }
+
+ public InferenceLogConfig setModelIdColumn(String modelIdColumn) {
+ this.modelIdColumn = modelIdColumn;
+ return this;
+ }
+
+ public String getModelIdColumn() {
+ return modelIdColumn;
+ }
+
+ public InferenceLogConfig setPredictionColumn(String predictionColumn) {
+ this.predictionColumn = predictionColumn;
+ return this;
+ }
+
+ public String getPredictionColumn() {
+ return predictionColumn;
+ }
+
+ public InferenceLogConfig setPredictionProbabilityColumn(String predictionProbabilityColumn) {
+ this.predictionProbabilityColumn = predictionProbabilityColumn;
+ return this;
+ }
+
+ public String getPredictionProbabilityColumn() {
+ return predictionProbabilityColumn;
+ }
+
+ public InferenceLogConfig setProblemType(InferenceProblemType problemType) {
+ this.problemType = problemType;
+ return this;
+ }
+
+ public InferenceProblemType getProblemType() {
+ return problemType;
+ }
+
+ public InferenceLogConfig setTimestampColumn(String timestampColumn) {
+ this.timestampColumn = timestampColumn;
+ return this;
+ }
+
+ public String getTimestampColumn() {
+ return timestampColumn;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ InferenceLogConfig that = (InferenceLogConfig) o;
+ return Objects.equals(granularities, that.granularities)
+ && Objects.equals(labelColumn, that.labelColumn)
+ && Objects.equals(modelIdColumn, that.modelIdColumn)
+ && Objects.equals(predictionColumn, that.predictionColumn)
+ && Objects.equals(predictionProbabilityColumn, that.predictionProbabilityColumn)
+ && Objects.equals(problemType, that.problemType)
+ && Objects.equals(timestampColumn, that.timestampColumn);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ granularities,
+ labelColumn,
+ modelIdColumn,
+ predictionColumn,
+ predictionProbabilityColumn,
+ problemType,
+ timestampColumn);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(InferenceLogConfig.class)
+ .add("granularities", granularities)
+ .add("labelColumn", labelColumn)
+ .add("modelIdColumn", modelIdColumn)
+ .add("predictionColumn", predictionColumn)
+ .add("predictionProbabilityColumn", predictionProbabilityColumn)
+ .add("problemType", problemType)
+ .add("timestampColumn", timestampColumn)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceProblemType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceProblemType.java
new file mode 100755
index 000000000..3adad7d38
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/InferenceProblemType.java
@@ -0,0 +1,12 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** Inference problem type the model aims to solve. */
+@Generated
+public enum InferenceProblemType {
+ INFERENCE_PROBLEM_TYPE_CLASSIFICATION,
+ INFERENCE_PROBLEM_TYPE_REGRESSION,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorRequest.java
new file mode 100755
index 000000000..5a7cc3b3a
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorRequest.java
@@ -0,0 +1,61 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class ListMonitorRequest {
+ /** */
+ @JsonIgnore
+ @QueryParam("page_size")
+ private Long pageSize;
+
+ /** */
+ @JsonIgnore
+ @QueryParam("page_token")
+ private String pageToken;
+
+ public ListMonitorRequest setPageSize(Long pageSize) {
+ this.pageSize = pageSize;
+ return this;
+ }
+
+ public Long getPageSize() {
+ return pageSize;
+ }
+
+ public ListMonitorRequest setPageToken(String pageToken) {
+ this.pageToken = pageToken;
+ return this;
+ }
+
+ public String getPageToken() {
+ return pageToken;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ListMonitorRequest that = (ListMonitorRequest) o;
+ return Objects.equals(pageSize, that.pageSize) && Objects.equals(pageToken, that.pageToken);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(pageSize, pageToken);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(ListMonitorRequest.class)
+ .add("pageSize", pageSize)
+ .add("pageToken", pageToken)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorResponse.java
new file mode 100755
index 000000000..ad6f2650b
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListMonitorResponse.java
@@ -0,0 +1,61 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Response for listing Monitors. */
+@Generated
+public class ListMonitorResponse {
+ /** */
+ @JsonProperty("monitors")
+ private Collection monitors;
+
+ /** */
+ @JsonProperty("next_page_token")
+ private String nextPageToken;
+
+ public ListMonitorResponse setMonitors(Collection monitors) {
+ this.monitors = monitors;
+ return this;
+ }
+
+ public Collection getMonitors() {
+ return monitors;
+ }
+
+ public ListMonitorResponse setNextPageToken(String nextPageToken) {
+ this.nextPageToken = nextPageToken;
+ return this;
+ }
+
+ public String getNextPageToken() {
+ return nextPageToken;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ListMonitorResponse that = (ListMonitorResponse) o;
+ return Objects.equals(monitors, that.monitors)
+ && Objects.equals(nextPageToken, that.nextPageToken);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(monitors, nextPageToken);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(ListMonitorResponse.class)
+ .add("monitors", monitors)
+ .add("nextPageToken", nextPageToken)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshRequest.java
new file mode 100755
index 000000000..2c0c4fc1f
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshRequest.java
@@ -0,0 +1,90 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import java.util.Objects;
+
+@Generated
+public class ListRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ /** */
+ @JsonIgnore
+ @QueryParam("page_size")
+ private Long pageSize;
+
+ /** */
+ @JsonIgnore
+ @QueryParam("page_token")
+ private String pageToken;
+
+ public ListRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public ListRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public ListRefreshRequest setPageSize(Long pageSize) {
+ this.pageSize = pageSize;
+ return this;
+ }
+
+ public Long getPageSize() {
+ return pageSize;
+ }
+
+ public ListRefreshRequest setPageToken(String pageToken) {
+ this.pageToken = pageToken;
+ return this;
+ }
+
+ public String getPageToken() {
+ return pageToken;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ListRefreshRequest that = (ListRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(pageSize, that.pageSize)
+ && Objects.equals(pageToken, that.pageToken);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, pageSize, pageToken);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(ListRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("pageSize", pageSize)
+ .add("pageToken", pageToken)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshResponse.java
new file mode 100755
index 000000000..d05dce54b
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/ListRefreshResponse.java
@@ -0,0 +1,61 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Response for listing refreshes. */
+@Generated
+public class ListRefreshResponse {
+ /** */
+ @JsonProperty("next_page_token")
+ private String nextPageToken;
+
+ /** */
+ @JsonProperty("refreshes")
+ private Collection refreshes;
+
+ public ListRefreshResponse setNextPageToken(String nextPageToken) {
+ this.nextPageToken = nextPageToken;
+ return this;
+ }
+
+ public String getNextPageToken() {
+ return nextPageToken;
+ }
+
+ public ListRefreshResponse setRefreshes(Collection refreshes) {
+ this.refreshes = refreshes;
+ return this;
+ }
+
+ public Collection getRefreshes() {
+ return refreshes;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ ListRefreshResponse that = (ListRefreshResponse) o;
+ return Objects.equals(nextPageToken, that.nextPageToken)
+ && Objects.equals(refreshes, that.refreshes);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(nextPageToken, refreshes);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(ListRefreshResponse.class)
+ .add("nextPageToken", nextPageToken)
+ .add("refreshes", refreshes)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Monitor.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Monitor.java
new file mode 100755
index 000000000..22146c211
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Monitor.java
@@ -0,0 +1,90 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** Monitor for the data quality of unity catalog entities such as schema or table. */
+@Generated
+public class Monitor {
+ /** Anomaly Detection Configuration, applicable to `schema` object types. */
+ @JsonProperty("anomaly_detection_config")
+ private AnomalyDetectionConfig anomalyDetectionConfig;
+
+ /** Data Profiling Configuration, applicable to `table` object types */
+ @JsonProperty("data_profiling_config")
+ private DataProfilingConfig dataProfilingConfig;
+
+ /** The UUID of the request object. For example, schema id. */
+ @JsonProperty("object_id")
+ private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonProperty("object_type")
+ private String objectType;
+
+ public Monitor setAnomalyDetectionConfig(AnomalyDetectionConfig anomalyDetectionConfig) {
+ this.anomalyDetectionConfig = anomalyDetectionConfig;
+ return this;
+ }
+
+ public AnomalyDetectionConfig getAnomalyDetectionConfig() {
+ return anomalyDetectionConfig;
+ }
+
+ public Monitor setDataProfilingConfig(DataProfilingConfig dataProfilingConfig) {
+ this.dataProfilingConfig = dataProfilingConfig;
+ return this;
+ }
+
+ public DataProfilingConfig getDataProfilingConfig() {
+ return dataProfilingConfig;
+ }
+
+ public Monitor setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public Monitor setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Monitor that = (Monitor) o;
+ return Objects.equals(anomalyDetectionConfig, that.anomalyDetectionConfig)
+ && Objects.equals(dataProfilingConfig, that.dataProfilingConfig)
+ && Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(anomalyDetectionConfig, dataProfilingConfig, objectId, objectType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(Monitor.class)
+ .add("anomalyDetectionConfig", anomalyDetectionConfig)
+ .add("dataProfilingConfig", dataProfilingConfig)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationDestination.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationDestination.java
new file mode 100755
index 000000000..5a5a920aa
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationDestination.java
@@ -0,0 +1,49 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Destination of the data quality monitoring notification. */
+@Generated
+public class NotificationDestination {
+ /**
+ * The list of email addresses to send the notification to. A maximum of 5 email addresses is
+ * supported.
+ */
+ @JsonProperty("email_addresses")
+ private Collection emailAddresses;
+
+ public NotificationDestination setEmailAddresses(Collection emailAddresses) {
+ this.emailAddresses = emailAddresses;
+ return this;
+ }
+
+ public Collection getEmailAddresses() {
+ return emailAddresses;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ NotificationDestination that = (NotificationDestination) o;
+ return Objects.equals(emailAddresses, that.emailAddresses);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(emailAddresses);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(NotificationDestination.class)
+ .add("emailAddresses", emailAddresses)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationSettings.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationSettings.java
new file mode 100755
index 000000000..6f3b950b5
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/NotificationSettings.java
@@ -0,0 +1,43 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** Settings for sending notifications on the data quality monitoring. */
+@Generated
+public class NotificationSettings {
+ /** Destinations to send notifications on failure/timeout. */
+ @JsonProperty("on_failure")
+ private NotificationDestination onFailure;
+
+ public NotificationSettings setOnFailure(NotificationDestination onFailure) {
+ this.onFailure = onFailure;
+ return this;
+ }
+
+ public NotificationDestination getOnFailure() {
+ return onFailure;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ NotificationSettings that = (NotificationSettings) o;
+ return Objects.equals(onFailure, that.onFailure);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(onFailure);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(NotificationSettings.class).add("onFailure", onFailure).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Refresh.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Refresh.java
new file mode 100755
index 000000000..3c612bd64
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/Refresh.java
@@ -0,0 +1,154 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The Refresh object gives information on a refresh of the data quality monitoring pipeline. */
+@Generated
+public class Refresh {
+ /** Time when the refresh ended (milliseconds since 1/1/1970 UTC). */
+ @JsonProperty("end_time_ms")
+ private Long endTimeMs;
+
+ /**
+ * An optional message to give insight into the current state of the refresh (e.g. FAILURE
+ * messages).
+ */
+ @JsonProperty("message")
+ private String message;
+
+ /** The UUID of the request object. For example, table id. */
+ @JsonProperty("object_id")
+ private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: table. */
+ @JsonProperty("object_type")
+ private String objectType;
+
+ /** Unique id of the refresh operation. */
+ @JsonProperty("refresh_id")
+ private Long refreshId;
+
+ /** Time when the refresh started (milliseconds since 1/1/1970 UTC). */
+ @JsonProperty("start_time_ms")
+ private Long startTimeMs;
+
+ /** The current state of the refresh. */
+ @JsonProperty("state")
+ private RefreshState state;
+
+ /** What triggered the refresh. */
+ @JsonProperty("trigger")
+ private RefreshTrigger trigger;
+
+ public Refresh setEndTimeMs(Long endTimeMs) {
+ this.endTimeMs = endTimeMs;
+ return this;
+ }
+
+ public Long getEndTimeMs() {
+ return endTimeMs;
+ }
+
+ public Refresh setMessage(String message) {
+ this.message = message;
+ return this;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public Refresh setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public Refresh setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public Refresh setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ public Refresh setStartTimeMs(Long startTimeMs) {
+ this.startTimeMs = startTimeMs;
+ return this;
+ }
+
+ public Long getStartTimeMs() {
+ return startTimeMs;
+ }
+
+ public Refresh setState(RefreshState state) {
+ this.state = state;
+ return this;
+ }
+
+ public RefreshState getState() {
+ return state;
+ }
+
+ public Refresh setTrigger(RefreshTrigger trigger) {
+ this.trigger = trigger;
+ return this;
+ }
+
+ public RefreshTrigger getTrigger() {
+ return trigger;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ Refresh that = (Refresh) o;
+ return Objects.equals(endTimeMs, that.endTimeMs)
+ && Objects.equals(message, that.message)
+ && Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refreshId, that.refreshId)
+ && Objects.equals(startTimeMs, that.startTimeMs)
+ && Objects.equals(state, that.state)
+ && Objects.equals(trigger, that.trigger);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ endTimeMs, message, objectId, objectType, refreshId, startTimeMs, state, trigger);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(Refresh.class)
+ .add("endTimeMs", endTimeMs)
+ .add("message", message)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refreshId", refreshId)
+ .add("startTimeMs", startTimeMs)
+ .add("state", state)
+ .add("trigger", trigger)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshState.java
new file mode 100755
index 000000000..d69055c0e
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshState.java
@@ -0,0 +1,16 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** The state of the refresh. */
+@Generated
+public enum RefreshState {
+ MONITOR_REFRESH_STATE_CANCELED,
+ MONITOR_REFRESH_STATE_FAILED,
+ MONITOR_REFRESH_STATE_PENDING,
+ MONITOR_REFRESH_STATE_RUNNING,
+ MONITOR_REFRESH_STATE_SUCCESS,
+ MONITOR_REFRESH_STATE_UNKNOWN,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshTrigger.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshTrigger.java
new file mode 100755
index 000000000..f40549424
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/RefreshTrigger.java
@@ -0,0 +1,14 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+
+/** The trigger of the refresh. */
+@Generated
+public enum RefreshTrigger {
+ MONITOR_REFRESH_TRIGGER_DATA_CHANGE,
+ MONITOR_REFRESH_TRIGGER_MANUAL,
+ MONITOR_REFRESH_TRIGGER_SCHEDULE,
+ MONITOR_REFRESH_TRIGGER_UNKNOWN,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/SnapshotConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/SnapshotConfig.java
new file mode 100755
index 000000000..de4158cf0
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/SnapshotConfig.java
@@ -0,0 +1,29 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/** Snapshot analysis configuration. */
+@Generated
+public class SnapshotConfig {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(SnapshotConfig.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/TimeSeriesConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/TimeSeriesConfig.java
new file mode 100755
index 000000000..eb7d0402e
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/TimeSeriesConfig.java
@@ -0,0 +1,63 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+/** Time series analysis configuration. */
+@Generated
+public class TimeSeriesConfig {
+ /**
+ * List of granularities to use when aggregating data into time windows based on their timestamp.
+ */
+ @JsonProperty("granularities")
+ private Collection granularities;
+
+ /** Column for the timestamp. */
+ @JsonProperty("timestamp_column")
+ private String timestampColumn;
+
+ public TimeSeriesConfig setGranularities(Collection granularities) {
+ this.granularities = granularities;
+ return this;
+ }
+
+ public Collection getGranularities() {
+ return granularities;
+ }
+
+ public TimeSeriesConfig setTimestampColumn(String timestampColumn) {
+ this.timestampColumn = timestampColumn;
+ return this;
+ }
+
+ public String getTimestampColumn() {
+ return timestampColumn;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ TimeSeriesConfig that = (TimeSeriesConfig) o;
+ return Objects.equals(granularities, that.granularities)
+ && Objects.equals(timestampColumn, that.timestampColumn);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(granularities, timestampColumn);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(TimeSeriesConfig.class)
+ .add("granularities", granularities)
+ .add("timestampColumn", timestampColumn)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateMonitorRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateMonitorRequest.java
new file mode 100755
index 000000000..935381cea
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateMonitorRequest.java
@@ -0,0 +1,90 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateMonitorRequest {
+ /** The monitor to update. */
+ @JsonProperty("monitor")
+ private Monitor monitor;
+
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ /** The field mask to specify which fields to update. */
+ @JsonIgnore
+ @QueryParam("update_mask")
+ private String updateMask;
+
+ public UpdateMonitorRequest setMonitor(Monitor monitor) {
+ this.monitor = monitor;
+ return this;
+ }
+
+ public Monitor getMonitor() {
+ return monitor;
+ }
+
+ public UpdateMonitorRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public UpdateMonitorRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public UpdateMonitorRequest setUpdateMask(String updateMask) {
+ this.updateMask = updateMask;
+ return this;
+ }
+
+ public String getUpdateMask() {
+ return updateMask;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateMonitorRequest that = (UpdateMonitorRequest) o;
+ return Objects.equals(monitor, that.monitor)
+ && Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(updateMask, that.updateMask);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(monitor, objectId, objectType, updateMask);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateMonitorRequest.class)
+ .add("monitor", monitor)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("updateMask", updateMask)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateRefreshRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateRefreshRequest.java
new file mode 100755
index 000000000..9522428bd
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dataquality/UpdateRefreshRequest.java
@@ -0,0 +1,104 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.dataquality;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateRefreshRequest {
+ /** The UUID of the request object. For example, schema id. */
+ @JsonIgnore private String objectId;
+
+ /** The type of the monitored object. Can be one of the following: schema or table. */
+ @JsonIgnore private String objectType;
+
+ /** The refresh to update. */
+ @JsonProperty("refresh")
+ private Refresh refresh;
+
+ /** Unique id of the refresh operation. */
+ @JsonIgnore private Long refreshId;
+
+ /** The field mask to specify which fields to update. */
+ @JsonIgnore
+ @QueryParam("update_mask")
+ private String updateMask;
+
+ public UpdateRefreshRequest setObjectId(String objectId) {
+ this.objectId = objectId;
+ return this;
+ }
+
+ public String getObjectId() {
+ return objectId;
+ }
+
+ public UpdateRefreshRequest setObjectType(String objectType) {
+ this.objectType = objectType;
+ return this;
+ }
+
+ public String getObjectType() {
+ return objectType;
+ }
+
+ public UpdateRefreshRequest setRefresh(Refresh refresh) {
+ this.refresh = refresh;
+ return this;
+ }
+
+ public Refresh getRefresh() {
+ return refresh;
+ }
+
+ public UpdateRefreshRequest setRefreshId(Long refreshId) {
+ this.refreshId = refreshId;
+ return this;
+ }
+
+ public Long getRefreshId() {
+ return refreshId;
+ }
+
+ public UpdateRefreshRequest setUpdateMask(String updateMask) {
+ this.updateMask = updateMask;
+ return this;
+ }
+
+ public String getUpdateMask() {
+ return updateMask;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateRefreshRequest that = (UpdateRefreshRequest) o;
+ return Objects.equals(objectId, that.objectId)
+ && Objects.equals(objectType, that.objectType)
+ && Objects.equals(refresh, that.refresh)
+ && Objects.equals(refreshId, that.refreshId)
+ && Objects.equals(updateMask, that.updateMask);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(objectId, objectType, refresh, refreshId, updateMask);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateRefreshRequest.class)
+ .add("objectId", objectId)
+ .add("objectType", objectType)
+ .add("refresh", refresh)
+ .add("refreshId", refreshId)
+ .add("updateMask", updateMask)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionLevelsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionLevelsRequest.java
index 5aacd813f..8ae58f6a5 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionLevelsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionLevelsRequest.java
@@ -15,7 +15,7 @@ public class GetPermissionLevelsRequest {
/**
* The type of the request object. Can be one of the following: alerts, alertsv2, authorization,
* clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files,
- * instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
+ * genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
* serving-endpoints, or warehouses.
*/
@JsonIgnore private String requestObjectType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionRequest.java
index 9ee5386f6..801a423e4 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/GetPermissionRequest.java
@@ -15,7 +15,7 @@ public class GetPermissionRequest {
/**
* The type of the request object. Can be one of the following: alerts, alertsv2, authorization,
* clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files,
- * instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
+ * genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
* serving-endpoints, or warehouses.
*/
@JsonIgnore private String requestObjectType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/SetObjectPermissions.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/SetObjectPermissions.java
index 188f7fda0..25ba32997 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/SetObjectPermissions.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/SetObjectPermissions.java
@@ -21,7 +21,7 @@ public class SetObjectPermissions {
/**
* The type of the request object. Can be one of the following: alerts, alertsv2, authorization,
* clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files,
- * instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
+ * genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
* serving-endpoints, or warehouses.
*/
@JsonIgnore private String requestObjectType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UpdateObjectPermissions.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UpdateObjectPermissions.java
index 442fbf40d..b7ea0195f 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UpdateObjectPermissions.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/iam/UpdateObjectPermissions.java
@@ -21,7 +21,7 @@ public class UpdateObjectPermissions {
/**
* The type of the request object. Can be one of the following: alerts, alertsv2, authorization,
* clusters, cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files,
- * instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
+ * genie, instance-pools, jobs, notebooks, pipelines, queries, registered-models, repos,
* serving-endpoints, or warehouses.
*/
@JsonIgnore private String requestObjectType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsKeyInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsKeyInfo.java
index aeaa56801..9cd72cb1f 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsKeyInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AwsKeyInfo.java
@@ -22,9 +22,9 @@ public class AwsKeyInfo {
private String keyRegion;
/**
- * This field applies only if the `use_cases` property includes `STORAGE`. If this is set to
- * `true` or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to
- * use this key for encrypting EBS volumes, set to `false`.
+ * This field applies only if the `use_cases` property includes `STORAGE`. If this is set to true
+ * or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to use this
+ * key for encrypting EBS volumes, set to false.
*/
@JsonProperty("reuse_key_for_cluster_volumes")
private Boolean reuseKeyForClusterVolumes;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureKeyInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureKeyInfo.java
new file mode 100755
index 000000000..69ababff2
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/AzureKeyInfo.java
@@ -0,0 +1,126 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class AzureKeyInfo {
+ /**
+ * The Disk Encryption Set id that is used to represent the key info used for Managed Disk BYOK
+ * use case
+ */
+ @JsonProperty("disk_encryption_set_id")
+ private String diskEncryptionSetId;
+
+ /**
+ * The structure to store key access credential This is set if the Managed Identity is being used
+ * to access the Azure Key Vault key.
+ */
+ @JsonProperty("key_access_configuration")
+ private KeyAccessConfiguration keyAccessConfiguration;
+
+ /** The name of the key in KeyVault. */
+ @JsonProperty("key_name")
+ private String keyName;
+
+ /** The base URI of the KeyVault. */
+ @JsonProperty("key_vault_uri")
+ private String keyVaultUri;
+
+ /** The tenant id where the KeyVault lives. */
+ @JsonProperty("tenant_id")
+ private String tenantId;
+
+ /** The current key version. */
+ @JsonProperty("version")
+ private String version;
+
+ public AzureKeyInfo setDiskEncryptionSetId(String diskEncryptionSetId) {
+ this.diskEncryptionSetId = diskEncryptionSetId;
+ return this;
+ }
+
+ public String getDiskEncryptionSetId() {
+ return diskEncryptionSetId;
+ }
+
+ public AzureKeyInfo setKeyAccessConfiguration(KeyAccessConfiguration keyAccessConfiguration) {
+ this.keyAccessConfiguration = keyAccessConfiguration;
+ return this;
+ }
+
+ public KeyAccessConfiguration getKeyAccessConfiguration() {
+ return keyAccessConfiguration;
+ }
+
+ public AzureKeyInfo setKeyName(String keyName) {
+ this.keyName = keyName;
+ return this;
+ }
+
+ public String getKeyName() {
+ return keyName;
+ }
+
+ public AzureKeyInfo setKeyVaultUri(String keyVaultUri) {
+ this.keyVaultUri = keyVaultUri;
+ return this;
+ }
+
+ public String getKeyVaultUri() {
+ return keyVaultUri;
+ }
+
+ public AzureKeyInfo setTenantId(String tenantId) {
+ this.tenantId = tenantId;
+ return this;
+ }
+
+ public String getTenantId() {
+ return tenantId;
+ }
+
+ public AzureKeyInfo setVersion(String version) {
+ this.version = version;
+ return this;
+ }
+
+ public String getVersion() {
+ return version;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ AzureKeyInfo that = (AzureKeyInfo) o;
+ return Objects.equals(diskEncryptionSetId, that.diskEncryptionSetId)
+ && Objects.equals(keyAccessConfiguration, that.keyAccessConfiguration)
+ && Objects.equals(keyName, that.keyName)
+ && Objects.equals(keyVaultUri, that.keyVaultUri)
+ && Objects.equals(tenantId, that.tenantId)
+ && Objects.equals(version, that.version);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(
+ diskEncryptionSetId, keyAccessConfiguration, keyName, keyVaultUri, tenantId, version);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(AzureKeyInfo.class)
+ .add("diskEncryptionSetId", diskEncryptionSetId)
+ .add("keyAccessConfiguration", keyAccessConfiguration)
+ .add("keyName", keyName)
+ .add("keyVaultUri", keyVaultUri)
+ .add("tenantId", tenantId)
+ .add("version", version)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CloudResourceContainer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CloudResourceContainer.java
index ca8290c0b..721620a58 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CloudResourceContainer.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CloudResourceContainer.java
@@ -7,7 +7,6 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** The general workspace configurations that are specific to cloud providers. */
@Generated
public class CloudResourceContainer {
/** */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateAwsKeyInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateAwsKeyInfo.java
index ae5ac50f5..6478ac476 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateAwsKeyInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateAwsKeyInfo.java
@@ -13,17 +13,18 @@ public class CreateAwsKeyInfo {
@JsonProperty("key_alias")
private String keyAlias;
- /**
- * The AWS KMS key's Amazon Resource Name (ARN). Note that the key's AWS region is inferred from
- * the ARN.
- */
+ /** The AWS KMS key's Amazon Resource Name (ARN). */
@JsonProperty("key_arn")
private String keyArn;
+ /** The AWS KMS key region. */
+ @JsonProperty("key_region")
+ private String keyRegion;
+
/**
- * This field applies only if the `use_cases` property includes `STORAGE`. If this is set to
- * `true` or omitted, the key is also used to encrypt cluster EBS volumes. To not use this key
- * also for encrypting EBS volumes, set this to `false`.
+ * This field applies only if the `use_cases` property includes `STORAGE`. If this is set to true
+ * or omitted, the key is also used to encrypt cluster EBS volumes. If you do not want to use this
+ * key for encrypting EBS volumes, set to false.
*/
@JsonProperty("reuse_key_for_cluster_volumes")
private Boolean reuseKeyForClusterVolumes;
@@ -46,6 +47,15 @@ public String getKeyArn() {
return keyArn;
}
+ public CreateAwsKeyInfo setKeyRegion(String keyRegion) {
+ this.keyRegion = keyRegion;
+ return this;
+ }
+
+ public String getKeyRegion() {
+ return keyRegion;
+ }
+
public CreateAwsKeyInfo setReuseKeyForClusterVolumes(Boolean reuseKeyForClusterVolumes) {
this.reuseKeyForClusterVolumes = reuseKeyForClusterVolumes;
return this;
@@ -62,12 +72,13 @@ public boolean equals(Object o) {
CreateAwsKeyInfo that = (CreateAwsKeyInfo) o;
return Objects.equals(keyAlias, that.keyAlias)
&& Objects.equals(keyArn, that.keyArn)
+ && Objects.equals(keyRegion, that.keyRegion)
&& Objects.equals(reuseKeyForClusterVolumes, that.reuseKeyForClusterVolumes);
}
@Override
public int hashCode() {
- return Objects.hash(keyAlias, keyArn, reuseKeyForClusterVolumes);
+ return Objects.hash(keyAlias, keyArn, keyRegion, reuseKeyForClusterVolumes);
}
@Override
@@ -75,6 +86,7 @@ public String toString() {
return new ToStringer(CreateAwsKeyInfo.class)
.add("keyAlias", keyAlias)
.add("keyArn", keyArn)
+ .add("keyRegion", keyRegion)
.add("reuseKeyForClusterVolumes", reuseKeyForClusterVolumes)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateCredentialStsRole.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateCredentialStsRole.java
index 461005798..c31784e77 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateCredentialStsRole.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateCredentialStsRole.java
@@ -9,7 +9,7 @@
@Generated
public class CreateCredentialStsRole {
- /** The Amazon Resource Name (ARN) of the cross account role. */
+ /** The Amazon Resource Name (ARN) of the cross account IAM role. */
@JsonProperty("role_arn")
private String roleArn;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateGcpKeyInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateGcpKeyInfo.java
index 2d62f0470..f070c7a31 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateGcpKeyInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateGcpKeyInfo.java
@@ -9,7 +9,10 @@
@Generated
public class CreateGcpKeyInfo {
- /** The GCP KMS key's resource name */
+ /**
+ * Globally unique kms key resource id of the form
+ * projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4
+ */
@JsonProperty("kms_key_id")
private String kmsKeyId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateNetworkRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateNetworkRequest.java
index 9dff47e10..65b9799c3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateNetworkRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateNetworkRequest.java
@@ -37,8 +37,8 @@ public class CreateNetworkRequest {
private NetworkVpcEndpoints vpcEndpoints;
/**
- * The ID of the VPC associated with this network. VPC IDs can be used in multiple network
- * configurations.
+ * The ID of the VPC associated with this network configuration. VPC IDs can be used in multiple
+ * networks.
*/
@JsonProperty("vpc_id")
private String vpcId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreatePrivateAccessSettingsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreatePrivateAccessSettingsRequest.java
index aed606c1f..bf8d4bd70 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreatePrivateAccessSettingsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreatePrivateAccessSettingsRequest.java
@@ -11,23 +11,24 @@
@Generated
public class CreatePrivateAccessSettingsRequest {
/**
- * An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when
- * registering the VPC endpoint configuration in your Databricks account. This is not the ID of
- * the VPC endpoint in AWS.
- *
- * Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC
- * endpoints that in your account that can connect to your workspace over AWS PrivateLink.
- *
- *
If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`,
- * this control only works for PrivateLink connections. To control how your workspace is accessed
- * via public internet, see [IP access lists].
- *
- *
[IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
+ * An array of Databricks VPC endpoint IDs. This is the Databricks ID returned when registering
+ * the VPC endpoint configuration in your Databricks account. This is not the ID of the VPC
+ * endpoint in AWS. Only used when private_access_level is set to ENDPOINT. This is an allow list
+ * of VPC endpoints registered in your Databricks account that can connect to your workspace over
+ * AWS PrivateLink. Note: If hybrid access to your workspace is enabled by setting
+ * public_access_enabled to true, this control only works for PrivateLink connections. To control
+ * how your workspace is accessed via public internet, see IP access lists.
*/
@JsonProperty("allowed_vpc_endpoint_ids")
private Collection allowedVpcEndpointIds;
- /** */
+ /**
+ * The private access level controls which VPC endpoints can connect to the UI or API of any
+ * workspace that attaches this private access settings object. `ACCOUNT` level access (the
+ * default) allows only VPC endpoints that are registered in your Databricks account connect to
+ * your workspace. `ENDPOINT` level access allows only specified VPC endpoints connect to your
+ * workspace. For details, see allowed_vpc_endpoint_ids.
+ */
@JsonProperty("private_access_level")
private PrivateAccessLevel privateAccessLevel;
@@ -37,14 +38,13 @@ public class CreatePrivateAccessSettingsRequest {
/**
* Determines if the workspace can be accessed over public internet. For fully private workspaces,
- * you can optionally specify `false`, but only if you implement both the front-end and the
- * back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is
- * enabled.
+ * you can optionally specify false, but only if you implement both the front-end and the back-end
+ * PrivateLink connections. Otherwise, specify true, which means that public access is enabled.
*/
@JsonProperty("public_access_enabled")
private Boolean publicAccessEnabled;
- /** The cloud region for workspaces associated with this private access settings object. */
+ /** The AWS region for workspaces attached to this private access settings object. */
@JsonProperty("region")
private String region;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateStorageConfigurationRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateStorageConfigurationRequest.java
index 17bbcebc8..5c1e21eeb 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateStorageConfigurationRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateStorageConfigurationRequest.java
@@ -9,7 +9,17 @@
@Generated
public class CreateStorageConfigurationRequest {
- /** */
+ /**
+ * Optional IAM role that is used to access the workspace catalog which is created during
+ * workspace creation for UC by Default. If a storage configuration with this field populated is
+ * used to create a workspace, then a workspace catalog is created together with the workspace.
+ * The workspace catalog shares the root bucket with internal workspace storage (including DBFS
+ * root) but uses a dedicated bucket path prefix.
+ */
+ @JsonProperty("role_arn")
+ private String roleArn;
+
+ /** Root S3 bucket information. */
@JsonProperty("root_bucket_info")
private RootBucketInfo rootBucketInfo;
@@ -17,6 +27,15 @@ public class CreateStorageConfigurationRequest {
@JsonProperty("storage_configuration_name")
private String storageConfigurationName;
+ public CreateStorageConfigurationRequest setRoleArn(String roleArn) {
+ this.roleArn = roleArn;
+ return this;
+ }
+
+ public String getRoleArn() {
+ return roleArn;
+ }
+
public CreateStorageConfigurationRequest setRootBucketInfo(RootBucketInfo rootBucketInfo) {
this.rootBucketInfo = rootBucketInfo;
return this;
@@ -41,18 +60,20 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateStorageConfigurationRequest that = (CreateStorageConfigurationRequest) o;
- return Objects.equals(rootBucketInfo, that.rootBucketInfo)
+ return Objects.equals(roleArn, that.roleArn)
+ && Objects.equals(rootBucketInfo, that.rootBucketInfo)
&& Objects.equals(storageConfigurationName, that.storageConfigurationName);
}
@Override
public int hashCode() {
- return Objects.hash(rootBucketInfo, storageConfigurationName);
+ return Objects.hash(roleArn, rootBucketInfo, storageConfigurationName);
}
@Override
public String toString() {
return new ToStringer(CreateStorageConfigurationRequest.class)
+ .add("roleArn", roleArn)
.add("rootBucketInfo", rootBucketInfo)
.add("storageConfigurationName", storageConfigurationName)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateVpcEndpointRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateVpcEndpointRequest.java
index 00ed46856..4c13c8d3e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateVpcEndpointRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateVpcEndpointRequest.java
@@ -13,11 +13,11 @@ public class CreateVpcEndpointRequest {
@JsonProperty("aws_vpc_endpoint_id")
private String awsVpcEndpointId;
- /** */
+ /** The cloud info of this vpc endpoint. */
@JsonProperty("gcp_vpc_endpoint_info")
private GcpVpcEndpointInfo gcpVpcEndpointInfo;
- /** The AWS region in which this VPC endpoint object exists. */
+ /** The region in which this VPC endpoint object exists. */
@JsonProperty("region")
private String region;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateWorkspaceRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateWorkspaceRequest.java
index 31d107a91..4b4fdfd43 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateWorkspaceRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CreateWorkspaceRequest.java
@@ -10,14 +10,11 @@
@Generated
public class CreateWorkspaceRequest {
- /** The AWS region of the workspace's data plane. */
+ /** */
@JsonProperty("aws_region")
private String awsRegion;
- /**
- * The cloud provider which the workspace uses. For Google Cloud workspaces, always set this field
- * to `gcp`.
- */
+ /** The cloud name. This field always has the value `gcp`. */
@JsonProperty("cloud")
private String cloud;
@@ -39,28 +36,21 @@ public class CreateWorkspaceRequest {
/**
* The deployment name defines part of the subdomain for the workspace. The workspace URL for the
- * web application and REST APIs is `.cloud.databricks.com`. For
- * example, if the deployment name is `abcsales`, your workspace URL will be
- * `https://abcsales.cloud.databricks.com`. Hyphens are allowed. This property supports only the
- * set of characters that are allowed in a subdomain.
- *
- * To set this value, you must have a deployment name prefix. Contact your Databricks account
- * team to add an account deployment name prefix to your account.
- *
- *
Workspace deployment names follow the account prefix and a hyphen. For example, if your
- * account's deployment prefix is `acme` and the workspace deployment name is `workspace-1`, the
- * JSON response for the `deployment_name` field becomes `acme-workspace-1`. The workspace URL
- * would be `acme-workspace-1.cloud.databricks.com`.
- *
- *
You can also set the `deployment_name` to the reserved keyword `EMPTY` if you want the
- * deployment name to only include the deployment prefix. For example, if your account's
- * deployment prefix is `acme` and the workspace deployment name is `EMPTY`, the `deployment_name`
- * becomes `acme` only and the workspace URL is `acme.cloud.databricks.com`.
- *
- *
This value must be unique across all non-deleted deployments across all AWS regions.
- *
- *
If a new workspace omits this property, the server generates a unique deployment name for
- * you with the pattern `dbc-xxxxxxxx-xxxx`.
+ * web application and REST APIs is .cloud.databricks.com. For example,
+ * if the deployment name is abcsales, your workspace URL will be
+ * https://abcsales.cloud.databricks.com. Hyphens are allowed. This property supports only the set
+ * of characters that are allowed in a subdomain. To set this value, you must have a deployment
+ * name prefix. Contact your Databricks account team to add an account deployment name prefix to
+ * your account. Workspace deployment names follow the account prefix and a hyphen. For example,
+ * if your account's deployment prefix is acme and the workspace deployment name is workspace-1,
+ * the JSON response for the deployment_name field becomes acme-workspace-1. The workspace URL
+ * would be acme-workspace-1.cloud.databricks.com. You can also set the deployment_name to the
+ * reserved keyword EMPTY if you want the deployment name to only include the deployment prefix.
+ * For example, if your account's deployment prefix is acme and the workspace deployment name is
+ * EMPTY, the deployment_name becomes acme only and the workspace URL is
+ * acme.cloud.databricks.com. This value must be unique across all non-deleted deployments across
+ * all AWS regions. If a new workspace omits this property, the server generates a unique
+ * deployment name for you with the pattern dbc-xxxxxxxx-xxxx.
*/
@JsonProperty("deployment_name")
private String deploymentName;
@@ -73,13 +63,9 @@ public class CreateWorkspaceRequest {
@JsonProperty("gke_config")
private GkeConfig gkeConfig;
- /** Whether no public IP is enabled for the workspace. */
- @JsonProperty("is_no_public_ip_enabled")
- private Boolean isNoPublicIpEnabled;
-
/**
- * The Google Cloud region of the workspace data plane in your Google account. For example,
- * `us-east4`.
+ * The Google Cloud region of the workspace data plane in your Google account (for example,
+ * `us-east4`).
*/
@JsonProperty("location")
private String location;
@@ -87,13 +73,16 @@ public class CreateWorkspaceRequest {
/**
* The ID of the workspace's managed services encryption key configuration object. This is used to
* help protect and control access to the workspace's notebooks, secrets, Databricks SQL queries,
- * and query history. The provided key configuration object property `use_cases` must contain
- * `MANAGED_SERVICES`.
+ * and query history. The provided key configuration object property use_cases must contain
+ * MANAGED_SERVICES.
*/
@JsonProperty("managed_services_customer_managed_key_id")
private String managedServicesCustomerManagedKeyId;
- /** */
+ /**
+ * The ID of the workspace's network configuration object. To use AWS PrivateLink, this field is
+ * required.
+ */
@JsonProperty("network_id")
private String networkId;
@@ -102,11 +91,10 @@ public class CreateWorkspaceRequest {
private PricingTier pricingTier;
/**
- * ID of the workspace's private access settings object. Only used for PrivateLink. This ID must
- * be specified for customers using [AWS PrivateLink] for either front-end (user-to-workspace
+ * ID of the workspace's private access settings object. Only used for PrivateLink. You must
+ * specify this ID if you are using [AWS PrivateLink] for either front-end (user-to-workspace
* connection), back-end (data plane to control plane connection), or both connection types.
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
+ * Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
*
*
[AWS PrivateLink]: https://aws.amazon.com/privatelink/ [Databricks article about
* PrivateLink]:
@@ -115,19 +103,19 @@ public class CreateWorkspaceRequest {
@JsonProperty("private_access_settings_id")
private String privateAccessSettingsId;
- /** The ID of the workspace's storage configuration object. */
+ /** ID of the workspace's storage configuration object. */
@JsonProperty("storage_configuration_id")
private String storageConfigurationId;
/**
* The ID of the workspace's storage encryption key configuration object. This is used to encrypt
* the workspace's root S3 bucket (root DBFS and system data) and, optionally, cluster EBS
- * volumes. The provided key configuration object property `use_cases` must contain `STORAGE`.
+ * volumes. The provided key configuration object property use_cases must contain STORAGE.
*/
@JsonProperty("storage_customer_managed_key_id")
private String storageCustomerManagedKeyId;
- /** The workspace's human-readable name. */
+ /** The human-readable name of the workspace. */
@JsonProperty("workspace_name")
private String workspaceName;
@@ -205,15 +193,6 @@ public GkeConfig getGkeConfig() {
return gkeConfig;
}
- public CreateWorkspaceRequest setIsNoPublicIpEnabled(Boolean isNoPublicIpEnabled) {
- this.isNoPublicIpEnabled = isNoPublicIpEnabled;
- return this;
- }
-
- public Boolean getIsNoPublicIpEnabled() {
- return isNoPublicIpEnabled;
- }
-
public CreateWorkspaceRequest setLocation(String location) {
this.location = location;
return this;
@@ -300,7 +279,6 @@ public boolean equals(Object o) {
&& Objects.equals(deploymentName, that.deploymentName)
&& Objects.equals(gcpManagedNetworkConfig, that.gcpManagedNetworkConfig)
&& Objects.equals(gkeConfig, that.gkeConfig)
- && Objects.equals(isNoPublicIpEnabled, that.isNoPublicIpEnabled)
&& Objects.equals(location, that.location)
&& Objects.equals(
managedServicesCustomerManagedKeyId, that.managedServicesCustomerManagedKeyId)
@@ -323,7 +301,6 @@ public int hashCode() {
deploymentName,
gcpManagedNetworkConfig,
gkeConfig,
- isNoPublicIpEnabled,
location,
managedServicesCustomerManagedKeyId,
networkId,
@@ -345,7 +322,6 @@ public String toString() {
.add("deploymentName", deploymentName)
.add("gcpManagedNetworkConfig", gcpManagedNetworkConfig)
.add("gkeConfig", gkeConfig)
- .add("isNoPublicIpEnabled", isNoPublicIpEnabled)
.add("location", location)
.add("managedServicesCustomerManagedKeyId", managedServicesCustomerManagedKeyId)
.add("networkId", networkId)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java
index 40ca82451..035ea5659 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsAPI.java
@@ -48,16 +48,16 @@ public Credential create(CreateCredentialRequest request) {
return impl.create(request);
}
- public void delete(String credentialsId) {
- delete(new DeleteCredentialRequest().setCredentialsId(credentialsId));
+ public Credential delete(String credentialsId) {
+ return delete(new DeleteCredentialRequest().setCredentialsId(credentialsId));
}
/**
* Deletes a Databricks credential configuration object for an account, both specified by ID. You
* cannot delete a credential that is associated with any workspace.
*/
- public void delete(DeleteCredentialRequest request) {
- impl.delete(request);
+ public Credential delete(DeleteCredentialRequest request) {
+ return impl.delete(request);
}
public Credential get(String credentialsId) {
@@ -69,7 +69,7 @@ public Credential get(GetCredentialRequest request) {
return impl.get(request);
}
- /** Gets all Databricks credential configurations associated with an account specified by ID. */
+ /** List Databricks credential configuration objects for an account, specified by ID. */
public Iterable list() {
return impl.list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsImpl.java
index 4aca2d8bb..029b5e209 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsImpl.java
@@ -33,7 +33,7 @@ public Credential create(CreateCredentialRequest request) {
}
@Override
- public void delete(DeleteCredentialRequest request) {
+ public Credential delete(DeleteCredentialRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/credentials/%s",
@@ -42,7 +42,7 @@ public void delete(DeleteCredentialRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, Credential.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsService.java
index 620a274b6..31a55161c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CredentialsService.java
@@ -38,11 +38,11 @@ public interface CredentialsService {
* Deletes a Databricks credential configuration object for an account, both specified by ID. You
* cannot delete a credential that is associated with any workspace.
*/
- void delete(DeleteCredentialRequest deleteCredentialRequest);
+ Credential delete(DeleteCredentialRequest deleteCredentialRequest);
/** Gets a Databricks credential configuration object for an account, both specified by ID. */
Credential get(GetCredentialRequest getCredentialRequest);
- /** Gets all Databricks credential configurations associated with an account specified by ID. */
+ /** List Databricks credential configuration objects for an account, specified by ID. */
Collection list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingComputeMode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingComputeMode.java
new file mode 100755
index 000000000..e6b581f25
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingComputeMode.java
@@ -0,0 +1,15 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+
+/**
+ * Corresponds to compute mode defined here:
+ * https://src.dev.databricks.com/databricks/universe@9076536b18479afd639d1c1f9dd5a59f72215e69/-/blob/central/api/common.proto?L872
+ */
+@Generated
+public enum CustomerFacingComputeMode {
+ HYBRID,
+ SERVERLESS,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingGcpCloudResourceContainer.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingGcpCloudResourceContainer.java
index 5781e69d3..cf3d76e0c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingGcpCloudResourceContainer.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingGcpCloudResourceContainer.java
@@ -7,13 +7,9 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** The general workspace configurations that are specific to Google Cloud. */
@Generated
public class CustomerFacingGcpCloudResourceContainer {
- /**
- * The Google Cloud project ID, which the workspace uses to instantiate cloud resources for your
- * workspace.
- */
+ /** */
@JsonProperty("project_id")
private String projectId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingStorageMode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingStorageMode.java
new file mode 100755
index 000000000..db8196275
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerFacingStorageMode.java
@@ -0,0 +1,11 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+
+@Generated
+public enum CustomerFacingStorageMode {
+ CUSTOMER_HOSTED,
+ DEFAULT_STORAGE,
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerManagedKey.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerManagedKey.java
index 40da65d77..737b2a075 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerManagedKey.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/CustomerManagedKey.java
@@ -18,6 +18,10 @@ public class CustomerManagedKey {
@JsonProperty("aws_key_info")
private AwsKeyInfo awsKeyInfo;
+ /** */
+ @JsonProperty("azure_key_info")
+ private AzureKeyInfo azureKeyInfo;
+
/** Time in epoch milliseconds when the customer key was created. */
@JsonProperty("creation_time")
private Long creationTime;
@@ -52,6 +56,15 @@ public AwsKeyInfo getAwsKeyInfo() {
return awsKeyInfo;
}
+ public CustomerManagedKey setAzureKeyInfo(AzureKeyInfo azureKeyInfo) {
+ this.azureKeyInfo = azureKeyInfo;
+ return this;
+ }
+
+ public AzureKeyInfo getAzureKeyInfo() {
+ return azureKeyInfo;
+ }
+
public CustomerManagedKey setCreationTime(Long creationTime) {
this.creationTime = creationTime;
return this;
@@ -95,6 +108,7 @@ public boolean equals(Object o) {
CustomerManagedKey that = (CustomerManagedKey) o;
return Objects.equals(accountId, that.accountId)
&& Objects.equals(awsKeyInfo, that.awsKeyInfo)
+ && Objects.equals(azureKeyInfo, that.azureKeyInfo)
&& Objects.equals(creationTime, that.creationTime)
&& Objects.equals(customerManagedKeyId, that.customerManagedKeyId)
&& Objects.equals(gcpKeyInfo, that.gcpKeyInfo)
@@ -104,7 +118,13 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
return Objects.hash(
- accountId, awsKeyInfo, creationTime, customerManagedKeyId, gcpKeyInfo, useCases);
+ accountId,
+ awsKeyInfo,
+ azureKeyInfo,
+ creationTime,
+ customerManagedKeyId,
+ gcpKeyInfo,
+ useCases);
}
@Override
@@ -112,6 +132,7 @@ public String toString() {
return new ToStringer(CustomerManagedKey.class)
.add("accountId", accountId)
.add("awsKeyInfo", awsKeyInfo)
+ .add("azureKeyInfo", azureKeyInfo)
.add("creationTime", creationTime)
.add("customerManagedKeyId", customerManagedKeyId)
.add("gcpKeyInfo", gcpKeyInfo)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeletePrivateAccesRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeletePrivateAccesRequest.java
index d58b68aa2..98d6f09f8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeletePrivateAccesRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeletePrivateAccesRequest.java
@@ -9,7 +9,7 @@
@Generated
public class DeletePrivateAccesRequest {
- /** Databricks Account API private access settings ID. */
+ /** */
@JsonIgnore private String privateAccessSettingsId;
public DeletePrivateAccesRequest setPrivateAccessSettingsId(String privateAccessSettingsId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteStorageRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteStorageRequest.java
index 3ff26d6da..9c7f773f7 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteStorageRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteStorageRequest.java
@@ -9,7 +9,7 @@
@Generated
public class DeleteStorageRequest {
- /** Databricks Account API storage configuration ID. */
+ /** */
@JsonIgnore private String storageConfigurationId;
public DeleteStorageRequest setStorageConfigurationId(String storageConfigurationId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteVpcEndpointRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteVpcEndpointRequest.java
index 7a3ebaf99..7c61b9be2 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteVpcEndpointRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteVpcEndpointRequest.java
@@ -9,7 +9,7 @@
@Generated
public class DeleteVpcEndpointRequest {
- /** Databricks VPC endpoint ID. */
+ /** */
@JsonIgnore private String vpcEndpointId;
public DeleteVpcEndpointRequest setVpcEndpointId(String vpcEndpointId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteWorkspaceRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteWorkspaceRequest.java
index 3ee6f1ee3..f5ede1c3e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteWorkspaceRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/DeleteWorkspaceRequest.java
@@ -9,7 +9,7 @@
@Generated
public class DeleteWorkspaceRequest {
- /** Workspace ID. */
+ /** */
@JsonIgnore private Long workspaceId;
public DeleteWorkspaceRequest setWorkspaceId(Long workspaceId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysAPI.java
index cc89ea513..a9c6d4ad7 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysAPI.java
@@ -56,16 +56,16 @@ public CustomerManagedKey create(CreateCustomerManagedKeyRequest request) {
return impl.create(request);
}
- public void delete(String customerManagedKeyId) {
- delete(new DeleteEncryptionKeyRequest().setCustomerManagedKeyId(customerManagedKeyId));
+ public CustomerManagedKey delete(String customerManagedKeyId) {
+ return delete(new DeleteEncryptionKeyRequest().setCustomerManagedKeyId(customerManagedKeyId));
}
/**
* Deletes a customer-managed key configuration object for an account. You cannot delete a
* configuration that is associated with a running workspace.
*/
- public void delete(DeleteEncryptionKeyRequest request) {
- impl.delete(request);
+ public CustomerManagedKey delete(DeleteEncryptionKeyRequest request) {
+ return impl.delete(request);
}
public CustomerManagedKey get(String customerManagedKeyId) {
@@ -90,19 +90,7 @@ public CustomerManagedKey get(GetEncryptionKeyRequest request) {
return impl.get(request);
}
- /**
- * Gets all customer-managed key configuration objects for an account. If the key is specified as
- * a workspace's managed services customer-managed key, Databricks uses the key to encrypt the
- * workspace's notebooks and secrets in the control plane, in addition to Databricks SQL queries
- * and query history. If the key is specified as a workspace's storage customer-managed key, the
- * key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS
- * volumes data in the data plane.
- *
- * **Important**: Customer-managed keys are supported only for some deployment types,
- * subscription types, and AWS regions.
- *
- *
This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks customer-managed key configurations for an account. */
public Iterable list() {
return impl.list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysImpl.java
index 20424aec0..22cf9aa46 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysImpl.java
@@ -34,7 +34,7 @@ public CustomerManagedKey create(CreateCustomerManagedKeyRequest request) {
}
@Override
- public void delete(DeleteEncryptionKeyRequest request) {
+ public CustomerManagedKey delete(DeleteEncryptionKeyRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/customer-managed-keys/%s",
@@ -43,7 +43,7 @@ public void delete(DeleteEncryptionKeyRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, CustomerManagedKey.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysService.java
index 083d083ae..e9741ccb3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EncryptionKeysService.java
@@ -46,7 +46,7 @@ public interface EncryptionKeysService {
* Deletes a customer-managed key configuration object for an account. You cannot delete a
* configuration that is associated with a running workspace.
*/
- void delete(DeleteEncryptionKeyRequest deleteEncryptionKeyRequest);
+ CustomerManagedKey delete(DeleteEncryptionKeyRequest deleteEncryptionKeyRequest);
/**
* Gets a customer-managed key configuration object for an account, specified by ID. This
@@ -64,18 +64,6 @@ public interface EncryptionKeysService {
*/
CustomerManagedKey get(GetEncryptionKeyRequest getEncryptionKeyRequest);
- /**
- * Gets all customer-managed key configuration objects for an account. If the key is specified as
- * a workspace's managed services customer-managed key, Databricks uses the key to encrypt the
- * workspace's notebooks and secrets in the control plane, in addition to Databricks SQL queries
- * and query history. If the key is specified as a workspace's storage customer-managed key, the
- * key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS
- * volumes data in the data plane.
- *
- * **Important**: Customer-managed keys are supported only for some deployment types,
- * subscription types, and AWS regions.
- *
- *
This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks customer-managed key configurations for an account. */
Collection list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EndpointUseCase.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EndpointUseCase.java
index 8c3f268ba..db8f30d1a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EndpointUseCase.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/EndpointUseCase.java
@@ -4,12 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * This enumeration represents the type of Databricks VPC [endpoint service] that was used when
- * creating this VPC endpoint.
- *
- * [endpoint service]: https://docs.aws.amazon.com/vpc/latest/privatelink/endpoint-service.html
- */
@Generated
public enum EndpointUseCase {
DATAPLANE_RELAY_ACCESS,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java
index 38b0ddcdb..5ccb0ab61 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ErrorType.java
@@ -6,8 +6,8 @@
import com.fasterxml.jackson.annotation.JsonProperty;
/**
- * The AWS resource associated with this error: credentials, VPC, subnet, security group, or network
- * ACL.
+ * ErrorType and WarningType are used to represent the type of error or warning by NetworkHealth and
+ * NetworkWarning defined in central/api/accounts/accounts.proto
*/
@Generated
public enum ErrorType {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java
deleted file mode 100755
index 7654c68e7..000000000
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ExternalCustomerInfo.java
+++ /dev/null
@@ -1,74 +0,0 @@
-// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
-
-package com.databricks.sdk.service.provisioning;
-
-import com.databricks.sdk.support.Generated;
-import com.databricks.sdk.support.ToStringer;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import java.util.Objects;
-
-@Generated
-public class ExternalCustomerInfo {
- /** Email of the authoritative user. */
- @JsonProperty("authoritative_user_email")
- private String authoritativeUserEmail;
-
- /** The authoritative user full name. */
- @JsonProperty("authoritative_user_full_name")
- private String authoritativeUserFullName;
-
- /** The legal entity name for the external workspace */
- @JsonProperty("customer_name")
- private String customerName;
-
- public ExternalCustomerInfo setAuthoritativeUserEmail(String authoritativeUserEmail) {
- this.authoritativeUserEmail = authoritativeUserEmail;
- return this;
- }
-
- public String getAuthoritativeUserEmail() {
- return authoritativeUserEmail;
- }
-
- public ExternalCustomerInfo setAuthoritativeUserFullName(String authoritativeUserFullName) {
- this.authoritativeUserFullName = authoritativeUserFullName;
- return this;
- }
-
- public String getAuthoritativeUserFullName() {
- return authoritativeUserFullName;
- }
-
- public ExternalCustomerInfo setCustomerName(String customerName) {
- this.customerName = customerName;
- return this;
- }
-
- public String getCustomerName() {
- return customerName;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- ExternalCustomerInfo that = (ExternalCustomerInfo) o;
- return Objects.equals(authoritativeUserEmail, that.authoritativeUserEmail)
- && Objects.equals(authoritativeUserFullName, that.authoritativeUserFullName)
- && Objects.equals(customerName, that.customerName);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(authoritativeUserEmail, authoritativeUserFullName, customerName);
- }
-
- @Override
- public String toString() {
- return new ToStringer(ExternalCustomerInfo.class)
- .add("authoritativeUserEmail", authoritativeUserEmail)
- .add("authoritativeUserFullName", authoritativeUserFullName)
- .add("customerName", customerName)
- .toString();
- }
-}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java
new file mode 100755
index 000000000..21b86acd9
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpCommonNetworkConfig.java
@@ -0,0 +1,67 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/**
+ * The shared network config for GCP workspace. This object has common network configurations that
+ * are network attributions of a workspace. DEPRECATED. Use GkeConfig instead.
+ */
+@Generated
+public class GcpCommonNetworkConfig {
+ /**
+ * The IP range that will be used to allocate GKE cluster master resources from. This field must
+ * not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.
+ */
+ @JsonProperty("gke_cluster_master_ip_range")
+ private String gkeClusterMasterIpRange;
+
+ /** The type of network connectivity of the GKE cluster. */
+ @JsonProperty("gke_connectivity_type")
+ private GkeConfigConnectivityType gkeConnectivityType;
+
+ public GcpCommonNetworkConfig setGkeClusterMasterIpRange(String gkeClusterMasterIpRange) {
+ this.gkeClusterMasterIpRange = gkeClusterMasterIpRange;
+ return this;
+ }
+
+ public String getGkeClusterMasterIpRange() {
+ return gkeClusterMasterIpRange;
+ }
+
+ public GcpCommonNetworkConfig setGkeConnectivityType(
+ GkeConfigConnectivityType gkeConnectivityType) {
+ this.gkeConnectivityType = gkeConnectivityType;
+ return this;
+ }
+
+ public GkeConfigConnectivityType getGkeConnectivityType() {
+ return gkeConnectivityType;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ GcpCommonNetworkConfig that = (GcpCommonNetworkConfig) o;
+ return Objects.equals(gkeClusterMasterIpRange, that.gkeClusterMasterIpRange)
+ && Objects.equals(gkeConnectivityType, that.gkeConnectivityType);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(gkeClusterMasterIpRange, gkeConnectivityType);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(GcpCommonNetworkConfig.class)
+ .add("gkeClusterMasterIpRange", gkeClusterMasterIpRange)
+ .add("gkeConnectivityType", gkeConnectivityType)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java
index be5eba082..2a6b12355 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpKeyInfo.java
@@ -9,7 +9,10 @@
@Generated
public class GcpKeyInfo {
- /** The GCP KMS key's resource name */
+ /**
+ * Globally unique kms key resource id of the form
+ * projects/testProjectId/locations/us-east4/keyRings/gcpCmkKeyRing/cryptoKeys/cmk-eastus4
+ */
@JsonProperty("kms_key_id")
private String kmsKeyId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java
index e27c531f4..d6e9e9efd 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpManagedNetworkConfig.java
@@ -7,46 +7,20 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/**
- * The network settings for the workspace. The configurations are only for Databricks-managed VPCs.
- * It is ignored if you specify a customer-managed VPC in the `network_id` field.", All the IP range
- * configurations must be mutually exclusive. An attempt to create a workspace fails if Databricks
- * detects an IP range overlap.
- *
- *
Specify custom IP ranges in CIDR format. The IP ranges for these fields must not overlap, and
- * all IP addresses must be entirely within the following ranges: `10.0.0.0/8`, `100.64.0.0/10`,
- * `172.16.0.0/12`, `192.168.0.0/16`, and `240.0.0.0/4`.
- *
- *
The sizes of these IP ranges affect the maximum number of nodes for the workspace.
- *
- *
**Important**: Confirm the IP ranges used by your Databricks workspace before creating the
- * workspace. You cannot change them after your workspace is deployed. If the IP address ranges for
- * your Databricks are too small, IP exhaustion can occur, causing your Databricks jobs to fail. To
- * determine the address range sizes that you need, Databricks provides a calculator as a Microsoft
- * Excel spreadsheet. See [calculate subnet sizes for a new workspace].
- *
- *
[calculate subnet sizes for a new workspace]:
- * https://docs.gcp.databricks.com/administration-guide/cloud-configurations/gcp/network-sizing.html
- */
+/** The network configuration for the workspace. */
@Generated
public class GcpManagedNetworkConfig {
- /**
- * The IP range from which to allocate GKE cluster pods. No bigger than `/9` and no smaller than
- * `/21`.
- */
+ /** The IP range that will be used to allocate GKE cluster Pods from. */
@JsonProperty("gke_cluster_pod_ip_range")
private String gkeClusterPodIpRange;
- /**
- * The IP range from which to allocate GKE cluster services. No bigger than `/16` and no smaller
- * than `/27`.
- */
+ /** The IP range that will be used to allocate GKE cluster Services from. */
@JsonProperty("gke_cluster_service_ip_range")
private String gkeClusterServiceIpRange;
/**
- * The IP range from which to allocate GKE cluster nodes. No bigger than `/9` and no smaller than
- * `/29`.
+ * The IP range which will be used to allocate GKE cluster nodes from. Note: Pods, services and
+ * master IP range must be mutually exclusive.
*/
@JsonProperty("subnet_cidr")
private String subnetCidr;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java
index 08f497789..ee4f4e301 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpNetworkInfo.java
@@ -7,42 +7,35 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/**
- * The Google Cloud specific information for this network (for example, the VPC ID, subnet ID, and
- * secondary IP ranges).
- */
@Generated
public class GcpNetworkInfo {
- /** The Google Cloud project ID of the VPC network. */
+ /** The GCP project ID for network resources. This project is where the VPC and subnet resides. */
@JsonProperty("network_project_id")
private String networkProjectId;
/**
- * The name of the secondary IP range for pods. A Databricks-managed GKE cluster uses this IP
- * range for its pods. This secondary IP range can be used by only one workspace.
+ * Name of the secondary range within the subnet that will be used by GKE as Pod IP range. This is
+ * BYO VPC specific. DB VPC uses network.getGcpManagedNetworkConfig.getGkeClusterPodIpRange
*/
@JsonProperty("pod_ip_range_name")
private String podIpRangeName;
- /**
- * The name of the secondary IP range for services. A Databricks-managed GKE cluster uses this IP
- * range for its services. This secondary IP range can be used by only one workspace.
- */
+ /** Name of the secondary range within the subnet that will be used by GKE as Service IP range. */
@JsonProperty("service_ip_range_name")
private String serviceIpRangeName;
- /** The ID of the subnet associated with this network. */
+ /**
+ * The customer-provided Subnet ID that will be available to Clusters in Workspaces using this
+ * Network.
+ */
@JsonProperty("subnet_id")
private String subnetId;
- /** The Google Cloud region of the workspace data plane (for example, `us-east4`). */
+ /** */
@JsonProperty("subnet_region")
private String subnetRegion;
- /**
- * The ID of the VPC associated with this network. VPC IDs can be used in multiple network
- * configurations.
- */
+ /** The customer-provided VPC ID. */
@JsonProperty("vpc_id")
private String vpcId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java
index 3298b72bf..2cbbbbec5 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GcpVpcEndpointInfo.java
@@ -7,26 +7,25 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** The Google Cloud specific information for this Private Service Connect endpoint. */
@Generated
public class GcpVpcEndpointInfo {
- /** Region of the PSC endpoint. */
+ /** */
@JsonProperty("endpoint_region")
private String endpointRegion;
- /** The Google Cloud project ID of the VPC network where the PSC connection resides. */
+ /** */
@JsonProperty("project_id")
private String projectId;
- /** The unique ID of this PSC connection. */
+ /** */
@JsonProperty("psc_connection_id")
private String pscConnectionId;
- /** The name of the PSC endpoint in the Google Cloud project. */
+ /** */
@JsonProperty("psc_endpoint_name")
private String pscEndpointName;
- /** The service attachment this PSC connection connects to. */
+ /** */
@JsonProperty("service_attachment_id")
private String serviceAttachmentId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java
index f4b3add8a..0d95a3d55 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetCredentialRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetCredentialRequest {
- /** Databricks Account API credential configuration ID */
+ /** Credential configuration ID */
@JsonIgnore private String credentialsId;
public GetCredentialRequest setCredentialsId(String credentialsId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java
index 563d2ea88..afe93b072 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetPrivateAccesRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetPrivateAccesRequest {
- /** Databricks Account API private access settings ID. */
+ /** */
@JsonIgnore private String privateAccessSettingsId;
public GetPrivateAccesRequest setPrivateAccessSettingsId(String privateAccessSettingsId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java
index f9a99b30a..15bae7e12 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetStorageRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetStorageRequest {
- /** Databricks Account API storage configuration ID. */
+ /** */
@JsonIgnore private String storageConfigurationId;
public GetStorageRequest setStorageConfigurationId(String storageConfigurationId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java
index 7b8b7bb44..30ece2cca 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GetWorkspaceRequest.java
@@ -9,7 +9,7 @@
@Generated
public class GetWorkspaceRequest {
- /** Workspace ID. */
+ /** */
@JsonIgnore private Long workspaceId;
public GetWorkspaceRequest setWorkspaceId(Long workspaceId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java
index 064319e4f..4435e9994 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/GkeConfig.java
@@ -7,26 +7,16 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** The configurations for the GKE cluster of a Databricks workspace. */
+/** The configurations of the GKE cluster used by the GCP workspace. */
@Generated
public class GkeConfig {
- /**
- * Specifies the network connectivity types for the GKE nodes and the GKE master network.
- *
- *
Set to `PRIVATE_NODE_PUBLIC_MASTER` for a private GKE cluster for the workspace. The GKE
- * nodes will not have public IPs.
- *
- *
Set to `PUBLIC_NODE_PUBLIC_MASTER` for a public GKE cluster. The nodes of a public GKE
- * cluster have public IP addresses.
- */
+ /** The type of network connectivity of the GKE cluster. */
@JsonProperty("connectivity_type")
private GkeConfigConnectivityType connectivityType;
/**
- * The IP range from which to allocate GKE cluster master resources. This field will be ignored if
- * GKE private cluster is not enabled.
- *
- *
It must be exactly as big as `/28`.
+ * The IP range that will be used to allocate GKE cluster master resources from. This field must
+ * not be set if gke_cluster_type=PUBLIC_NODE_PUBLIC_MASTER.
*/
@JsonProperty("master_ip_range")
private String masterIpRange;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java
new file mode 100755
index 000000000..14dc163c3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyAccessConfiguration.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The credential ID that is used to access the key vault. */
+@Generated
+public class KeyAccessConfiguration {
+ /** */
+ @JsonProperty("credential_id")
+ private String credentialId;
+
+ public KeyAccessConfiguration setCredentialId(String credentialId) {
+ this.credentialId = credentialId;
+ return this;
+ }
+
+ public String getCredentialId() {
+ return credentialId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ KeyAccessConfiguration that = (KeyAccessConfiguration) o;
+ return Objects.equals(credentialId, that.credentialId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(credentialId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(KeyAccessConfiguration.class)
+ .add("credentialId", credentialId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java
index 1c019242d..308014cf0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/KeyUseCase.java
@@ -4,15 +4,8 @@
import com.databricks.sdk.support.Generated;
-/**
- * Possible values are: * `MANAGED_SERVICES`: Encrypts notebook and secret data in the control plane
- * * `STORAGE`: Encrypts the workspace's root S3 bucket (root DBFS and system data) and, optionally,
- * cluster EBS volumes.
- */
@Generated
public enum KeyUseCase {
- MANAGED_SERVICES, // Encrypts notebook and secret data in the control plane
- STORAGE, // Encrypts the workspace's root S3 bucket (root DBFS and system data) and,
- // optionally, cluster EBS volumes.
-
+ MANAGED_SERVICES,
+ STORAGE,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java
index b149a4d48..39be0950e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Network.java
@@ -34,11 +34,17 @@ public class Network {
@JsonProperty("network_name")
private String networkName;
- /** */
+ /**
+ * IDs of one to five security groups associated with this network. Security group IDs **cannot**
+ * be used in multiple network configurations.
+ */
@JsonProperty("security_group_ids")
private Collection securityGroupIds;
- /** */
+ /**
+ * IDs of at least two subnets associated with this network. Subnet IDs **cannot** be used in
+ * multiple network configurations.
+ */
@JsonProperty("subnet_ids")
private Collection subnetIds;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworkVpcEndpoints.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworkVpcEndpoints.java
index 962a872a2..e227d5bfb 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworkVpcEndpoints.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworkVpcEndpoints.java
@@ -8,12 +8,6 @@
import java.util.Collection;
import java.util.Objects;
-/**
- * If specified, contains the VPC endpoints used to allow cluster communication from this VPC over
- * [AWS PrivateLink].
- *
- * [AWS PrivateLink]: https://aws.amazon.com/privatelink/
- */
@Generated
public class NetworkVpcEndpoints {
/**
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java
index 02cc7a527..628802833 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksAPI.java
@@ -34,8 +34,8 @@ public Network create(CreateNetworkRequest request) {
return impl.create(request);
}
- public void delete(String networkId) {
- delete(new DeleteNetworkRequest().setNetworkId(networkId));
+ public Network delete(String networkId) {
+ return delete(new DeleteNetworkRequest().setNetworkId(networkId));
}
/**
@@ -44,8 +44,8 @@ public void delete(String networkId) {
*
*
This operation is available only if your account is on the E2 version of the platform.
*/
- public void delete(DeleteNetworkRequest request) {
- impl.delete(request);
+ public Network delete(DeleteNetworkRequest request) {
+ return impl.delete(request);
}
public Network get(String networkId) {
@@ -57,11 +57,7 @@ public Network get(GetNetworkRequest request) {
return impl.get(request);
}
- /**
- * Gets a list of all Databricks network configurations for an account, specified by ID.
- *
- *
This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks network configurations for an account. */
public Iterable list() {
return impl.list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksImpl.java
index 5a6c8d710..524e596ac 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksImpl.java
@@ -32,7 +32,7 @@ public Network create(CreateNetworkRequest request) {
}
@Override
- public void delete(DeleteNetworkRequest request) {
+ public Network delete(DeleteNetworkRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/networks/%s",
@@ -41,7 +41,7 @@ public void delete(DeleteNetworkRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, Network.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksService.java
index db13e6997..092c7b5bb 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/NetworksService.java
@@ -26,15 +26,11 @@ public interface NetworksService {
*
* This operation is available only if your account is on the E2 version of the platform.
*/
- void delete(DeleteNetworkRequest deleteNetworkRequest);
+ Network delete(DeleteNetworkRequest deleteNetworkRequest);
/** Gets a Databricks network configuration, which represents a cloud VPC and its resources. */
Network get(GetNetworkRequest getNetworkRequest);
- /**
- * Gets a list of all Databricks network configurations for an account, specified by ID.
- *
- *
This operation is available only if your account is on the E2 version of the platform.
- */
+ /** Lists Databricks network configurations for an account. */
Collection list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PricingTier.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PricingTier.java
index cdbdc6e5a..b3879eb41 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PricingTier.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PricingTier.java
@@ -4,11 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * The pricing tier of the workspace. For pricing tier information, see [AWS Pricing].
- *
- * [AWS Pricing]: https://databricks.com/product/aws-pricing
- */
@Generated
public enum PricingTier {
COMMUNITY_EDITION,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java
index 82b70e7a6..644d702be 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessAPI.java
@@ -24,87 +24,53 @@ public PrivateAccessAPI(PrivateAccessService mock) {
}
/**
- * Creates a private access settings object, which specifies how your workspace is accessed over
- * [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings
- * object referenced by ID in the workspace's `private_access_settings_id` property.
- *
- *
You can share one private access settings with multiple workspaces in a single account.
- * However, private access settings are specific to AWS regions, so only workspaces in the same
- * AWS region can use a given private access settings object.
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Creates a private access settings configuration, which represents network access restrictions
+ * for workspace resources. Private access settings configure whether workspaces can be accessed
+ * from the public internet or only from private endpoints.
*/
public PrivateAccessSettings create(CreatePrivateAccessSettingsRequest request) {
return impl.create(request);
}
- public void delete(String privateAccessSettingsId) {
- delete(new DeletePrivateAccesRequest().setPrivateAccessSettingsId(privateAccessSettingsId));
+ public PrivateAccessSettings delete(String privateAccessSettingsId) {
+ return delete(
+ new DeletePrivateAccesRequest().setPrivateAccessSettingsId(privateAccessSettingsId));
}
- /**
- * Deletes a private access settings object, which determines how your workspace is accessed over
- * [AWS PrivateLink].
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
- public void delete(DeletePrivateAccesRequest request) {
- impl.delete(request);
+ /** Deletes a Databricks private access settings configuration, both specified by ID. */
+ public PrivateAccessSettings delete(DeletePrivateAccesRequest request) {
+ return impl.delete(request);
}
public PrivateAccessSettings get(String privateAccessSettingsId) {
return get(new GetPrivateAccesRequest().setPrivateAccessSettingsId(privateAccessSettingsId));
}
- /**
- * Gets a private access settings object, which specifies how your workspace is accessed over [AWS
- * PrivateLink].
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Gets a Databricks private access settings configuration, both specified by ID. */
public PrivateAccessSettings get(GetPrivateAccesRequest request) {
return impl.get(request);
}
- /** Gets a list of all private access settings objects for an account, specified by ID. */
+ /** Lists Databricks private access settings for an account. */
public Iterable list() {
return impl.list();
}
/**
* Updates an existing private access settings object, which specifies how your workspace is
- * accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access
- * settings object referenced by ID in the workspace's `private_access_settings_id` property.
- *
- * This operation completely overwrites your existing private access settings object attached
- * to your workspaces. All workspaces attached to the private access settings are affected by any
- * change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are
- * updated, effects of these changes might take several minutes to propagate to the workspace API.
- *
- *
You can share one private access settings object with multiple workspaces in a single
- * account. However, private access settings are specific to AWS regions, so only workspaces in
- * the same AWS region can use a given private access settings object.
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access
+ * settings object referenced by ID in the workspace's private_access_settings_id property. This
+ * operation completely overwrites your existing private access settings object attached to your
+ * workspaces. All workspaces attached to the private access settings are affected by any change.
+ * If public_access_enabled, private_access_level, or allowed_vpc_endpoint_ids are updated,
+ * effects of these changes might take several minutes to propagate to the workspace API. You can
+ * share one private access settings object with multiple workspaces in a single account. However,
+ * private access settings are specific to AWS regions, so only workspaces in the same AWS region
+ * can use a given private access settings object. Before configuring PrivateLink, read the
+ * Databricks article about PrivateLink.
*/
- public void replace(ReplacePrivateAccessSettingsRequest request) {
- impl.replace(request);
+ public PrivateAccessSettings replace(ReplacePrivateAccessSettingsRequest request) {
+ return impl.replace(request);
}
public PrivateAccessService impl() {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java
index dcca1eeba..1ed77901e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessImpl.java
@@ -34,7 +34,7 @@ public PrivateAccessSettings create(CreatePrivateAccessSettingsRequest request)
}
@Override
- public void delete(DeletePrivateAccesRequest request) {
+ public PrivateAccessSettings delete(DeletePrivateAccesRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/private-access-settings/%s",
@@ -43,7 +43,7 @@ public void delete(DeletePrivateAccesRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, PrivateAccessSettings.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
@@ -76,17 +76,19 @@ public Collection list() {
}
@Override
- public void replace(ReplacePrivateAccessSettingsRequest request) {
+ public PrivateAccessSettings replace(ReplacePrivateAccessSettingsRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/private-access-settings/%s",
apiClient.configuredAccountID(), request.getPrivateAccessSettingsId());
try {
- Request req = new Request("PUT", path, apiClient.serialize(request));
+ Request req =
+ new Request(
+ "PUT", path, apiClient.serialize(request.getCustomerFacingPrivateAccessSettings()));
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, PrivateAccessSettings.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessLevel.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessLevel.java
index eb3dba693..73b5dd3f1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessLevel.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessLevel.java
@@ -4,13 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * The private access level controls which VPC endpoints can connect to the UI or API of any
- * workspace that attaches this private access settings object. * `ACCOUNT` level access (the
- * default) allows only VPC endpoints that are registered in your Databricks account connect to your
- * workspace. * `ENDPOINT` level access allows only specified VPC endpoints connect to your
- * workspace. For details, see `allowed_vpc_endpoint_ids`.
- */
@Generated
public enum PrivateAccessLevel {
ACCOUNT,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessService.java
index 1b3d54252..66137804a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessService.java
@@ -14,69 +14,35 @@
@Generated
public interface PrivateAccessService {
/**
- * Creates a private access settings object, which specifies how your workspace is accessed over
- * [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access settings
- * object referenced by ID in the workspace's `private_access_settings_id` property.
- *
- * You can share one private access settings with multiple workspaces in a single account.
- * However, private access settings are specific to AWS regions, so only workspaces in the same
- * AWS region can use a given private access settings object.
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Creates a private access settings configuration, which represents network access restrictions
+ * for workspace resources. Private access settings configure whether workspaces can be accessed
+ * from the public internet or only from private endpoints.
*/
PrivateAccessSettings create(
CreatePrivateAccessSettingsRequest createPrivateAccessSettingsRequest);
- /**
- * Deletes a private access settings object, which determines how your workspace is accessed over
- * [AWS PrivateLink].
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
- void delete(DeletePrivateAccesRequest deletePrivateAccesRequest);
+ /** Deletes a Databricks private access settings configuration, both specified by ID. */
+ PrivateAccessSettings delete(DeletePrivateAccesRequest deletePrivateAccesRequest);
- /**
- * Gets a private access settings object, which specifies how your workspace is accessed over [AWS
- * PrivateLink].
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].",
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Gets a Databricks private access settings configuration, both specified by ID. */
PrivateAccessSettings get(GetPrivateAccesRequest getPrivateAccesRequest);
- /** Gets a list of all private access settings objects for an account, specified by ID. */
+ /** Lists Databricks private access settings for an account. */
Collection list();
/**
* Updates an existing private access settings object, which specifies how your workspace is
- * accessed over [AWS PrivateLink]. To use AWS PrivateLink, a workspace must have a private access
- * settings object referenced by ID in the workspace's `private_access_settings_id` property.
- *
- * This operation completely overwrites your existing private access settings object attached
- * to your workspaces. All workspaces attached to the private access settings are affected by any
- * change. If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` are
- * updated, effects of these changes might take several minutes to propagate to the workspace API.
- *
- *
You can share one private access settings object with multiple workspaces in a single
- * account. However, private access settings are specific to AWS regions, so only workspaces in
- * the same AWS region can use a given private access settings object.
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * accessed over AWS PrivateLink. To use AWS PrivateLink, a workspace must have a private access
+ * settings object referenced by ID in the workspace's private_access_settings_id property. This
+ * operation completely overwrites your existing private access settings object attached to your
+ * workspaces. All workspaces attached to the private access settings are affected by any change.
+ * If public_access_enabled, private_access_level, or allowed_vpc_endpoint_ids are updated,
+ * effects of these changes might take several minutes to propagate to the workspace API. You can
+ * share one private access settings object with multiple workspaces in a single account. However,
+ * private access settings are specific to AWS regions, so only workspaces in the same AWS region
+ * can use a given private access settings object. Before configuring PrivateLink, read the
+ * Databricks article about PrivateLink.
*/
- void replace(ReplacePrivateAccessSettingsRequest replacePrivateAccessSettingsRequest);
+ PrivateAccessSettings replace(
+ ReplacePrivateAccessSettingsRequest replacePrivateAccessSettingsRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java
index 03c466d78..a36f8b90b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/PrivateAccessSettings.java
@@ -8,17 +8,32 @@
import java.util.Collection;
import java.util.Objects;
+/** * */
@Generated
public class PrivateAccessSettings {
- /** The Databricks account ID that hosts the credential. */
+ /** The Databricks account ID that hosts the private access settings. */
@JsonProperty("account_id")
private String accountId;
- /** An array of Databricks VPC endpoint IDs. */
+ /**
+ * An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when
+ * registering the VPC endpoint configuration in your Databricks account. This is not the ID of
+ * the VPC endpoint in AWS. Only used when private_access_level is set to ENDPOINT. This is an
+ * allow list of VPC endpoints that in your account that can connect to your workspace over AWS
+ * PrivateLink. If hybrid access to your workspace is enabled by setting public_access_enabled to
+ * true, this control only works for PrivateLink connections. To control how your workspace is
+ * accessed via public internet, see IP access lists.
+ */
@JsonProperty("allowed_vpc_endpoint_ids")
private Collection allowedVpcEndpointIds;
- /** */
+ /**
+ * The private access level controls which VPC endpoints can connect to the UI or API of any
+ * workspace that attaches this private access settings object. `ACCOUNT` level access (the
+ * default) allows only VPC endpoints that are registered in your Databricks account connect to
+ * your workspace. `ENDPOINT` level access allows only specified VPC endpoints connect to your
+ * workspace. For details, see allowed_vpc_endpoint_ids.
+ */
@JsonProperty("private_access_level")
private PrivateAccessLevel privateAccessLevel;
@@ -32,14 +47,13 @@ public class PrivateAccessSettings {
/**
* Determines if the workspace can be accessed over public internet. For fully private workspaces,
- * you can optionally specify `false`, but only if you implement both the front-end and the
- * back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is
- * enabled.
+ * you can optionally specify false, but only if you implement both the front-end and the back-end
+ * PrivateLink connections. Otherwise, specify true, which means that public access is enabled.
*/
@JsonProperty("public_access_enabled")
private Boolean publicAccessEnabled;
- /** The cloud region for workspaces attached to this private access settings object. */
+ /** The AWS region for workspaces attached to this private access settings object. */
@JsonProperty("region")
private String region;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ReplacePrivateAccessSettingsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ReplacePrivateAccessSettingsRequest.java
index 4d62abff4..643a96014 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ReplacePrivateAccessSettingsRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/ReplacePrivateAccessSettingsRequest.java
@@ -6,70 +6,25 @@
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
-import java.util.Collection;
import java.util.Objects;
@Generated
public class ReplacePrivateAccessSettingsRequest {
- /**
- * An array of Databricks VPC endpoint IDs. This is the Databricks ID that is returned when
- * registering the VPC endpoint configuration in your Databricks account. This is not the ID of
- * the VPC endpoint in AWS.
- *
- * Only used when `private_access_level` is set to `ENDPOINT`. This is an allow list of VPC
- * endpoints that in your account that can connect to your workspace over AWS PrivateLink.
- *
- *
If hybrid access to your workspace is enabled by setting `public_access_enabled` to `true`,
- * this control only works for PrivateLink connections. To control how your workspace is accessed
- * via public internet, see [IP access lists].
- *
- *
[IP access lists]: https://docs.databricks.com/security/network/ip-access-list.html
- */
- @JsonProperty("allowed_vpc_endpoint_ids")
- private Collection allowedVpcEndpointIds;
+ /** Properties of the new private access settings object. */
+ @JsonProperty("customer_facing_private_access_settings")
+ private PrivateAccessSettings customerFacingPrivateAccessSettings;
- /** */
- @JsonProperty("private_access_level")
- private PrivateAccessLevel privateAccessLevel;
-
- /** Databricks Account API private access settings ID. */
+ /** Databricks private access settings ID. */
@JsonIgnore private String privateAccessSettingsId;
- /** The human-readable name of the private access settings object. */
- @JsonProperty("private_access_settings_name")
- private String privateAccessSettingsName;
-
- /**
- * Determines if the workspace can be accessed over public internet. For fully private workspaces,
- * you can optionally specify `false`, but only if you implement both the front-end and the
- * back-end PrivateLink connections. Otherwise, specify `true`, which means that public access is
- * enabled.
- */
- @JsonProperty("public_access_enabled")
- private Boolean publicAccessEnabled;
-
- /** The cloud region for workspaces associated with this private access settings object. */
- @JsonProperty("region")
- private String region;
-
- public ReplacePrivateAccessSettingsRequest setAllowedVpcEndpointIds(
- Collection allowedVpcEndpointIds) {
- this.allowedVpcEndpointIds = allowedVpcEndpointIds;
- return this;
- }
-
- public Collection getAllowedVpcEndpointIds() {
- return allowedVpcEndpointIds;
- }
-
- public ReplacePrivateAccessSettingsRequest setPrivateAccessLevel(
- PrivateAccessLevel privateAccessLevel) {
- this.privateAccessLevel = privateAccessLevel;
+ public ReplacePrivateAccessSettingsRequest setCustomerFacingPrivateAccessSettings(
+ PrivateAccessSettings customerFacingPrivateAccessSettings) {
+ this.customerFacingPrivateAccessSettings = customerFacingPrivateAccessSettings;
return this;
}
- public PrivateAccessLevel getPrivateAccessLevel() {
- return privateAccessLevel;
+ public PrivateAccessSettings getCustomerFacingPrivateAccessSettings() {
+ return customerFacingPrivateAccessSettings;
}
public ReplacePrivateAccessSettingsRequest setPrivateAccessSettingsId(
@@ -82,67 +37,26 @@ public String getPrivateAccessSettingsId() {
return privateAccessSettingsId;
}
- public ReplacePrivateAccessSettingsRequest setPrivateAccessSettingsName(
- String privateAccessSettingsName) {
- this.privateAccessSettingsName = privateAccessSettingsName;
- return this;
- }
-
- public String getPrivateAccessSettingsName() {
- return privateAccessSettingsName;
- }
-
- public ReplacePrivateAccessSettingsRequest setPublicAccessEnabled(Boolean publicAccessEnabled) {
- this.publicAccessEnabled = publicAccessEnabled;
- return this;
- }
-
- public Boolean getPublicAccessEnabled() {
- return publicAccessEnabled;
- }
-
- public ReplacePrivateAccessSettingsRequest setRegion(String region) {
- this.region = region;
- return this;
- }
-
- public String getRegion() {
- return region;
- }
-
@Override
public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ReplacePrivateAccessSettingsRequest that = (ReplacePrivateAccessSettingsRequest) o;
- return Objects.equals(allowedVpcEndpointIds, that.allowedVpcEndpointIds)
- && Objects.equals(privateAccessLevel, that.privateAccessLevel)
- && Objects.equals(privateAccessSettingsId, that.privateAccessSettingsId)
- && Objects.equals(privateAccessSettingsName, that.privateAccessSettingsName)
- && Objects.equals(publicAccessEnabled, that.publicAccessEnabled)
- && Objects.equals(region, that.region);
+ return Objects.equals(
+ customerFacingPrivateAccessSettings, that.customerFacingPrivateAccessSettings)
+ && Objects.equals(privateAccessSettingsId, that.privateAccessSettingsId);
}
@Override
public int hashCode() {
- return Objects.hash(
- allowedVpcEndpointIds,
- privateAccessLevel,
- privateAccessSettingsId,
- privateAccessSettingsName,
- publicAccessEnabled,
- region);
+ return Objects.hash(customerFacingPrivateAccessSettings, privateAccessSettingsId);
}
@Override
public String toString() {
return new ToStringer(ReplacePrivateAccessSettingsRequest.class)
- .add("allowedVpcEndpointIds", allowedVpcEndpointIds)
- .add("privateAccessLevel", privateAccessLevel)
+ .add("customerFacingPrivateAccessSettings", customerFacingPrivateAccessSettings)
.add("privateAccessSettingsId", privateAccessSettingsId)
- .add("privateAccessSettingsName", privateAccessSettingsName)
- .add("publicAccessEnabled", publicAccessEnabled)
- .add("region", region)
.toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/RootBucketInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/RootBucketInfo.java
index 4faf62f43..410637961 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/RootBucketInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/RootBucketInfo.java
@@ -7,10 +7,9 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/** Root S3 bucket information. */
@Generated
public class RootBucketInfo {
- /** The name of the S3 bucket. */
+ /** Name of the S3 bucket */
@JsonProperty("bucket_name")
private String bucketName;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageAPI.java
index 5dc37db4d..b8a3cb710 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageAPI.java
@@ -29,32 +29,21 @@ public StorageAPI(StorageService mock) {
impl = mock;
}
- /**
- * Creates new storage configuration for an account, specified by ID. Uploads a storage
- * configuration object that represents the root AWS S3 bucket in your account. Databricks stores
- * related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket,
- * you need to configure the required bucket policy.
- *
- * For information about how to create a new workspace with this API, see [Create a new
- * workspace using the Account API]
- *
- *
[Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
+ /** Creates a Databricks storage configuration for an account. */
public StorageConfiguration create(CreateStorageConfigurationRequest request) {
return impl.create(request);
}
- public void delete(String storageConfigurationId) {
- delete(new DeleteStorageRequest().setStorageConfigurationId(storageConfigurationId));
+ public StorageConfiguration delete(String storageConfigurationId) {
+ return delete(new DeleteStorageRequest().setStorageConfigurationId(storageConfigurationId));
}
/**
* Deletes a Databricks storage configuration. You cannot delete a storage configuration that is
* associated with any workspace.
*/
- public void delete(DeleteStorageRequest request) {
- impl.delete(request);
+ public StorageConfiguration delete(DeleteStorageRequest request) {
+ return impl.delete(request);
}
public StorageConfiguration get(String storageConfigurationId) {
@@ -66,7 +55,7 @@ public StorageConfiguration get(GetStorageRequest request) {
return impl.get(request);
}
- /** Gets a list of all Databricks storage configurations for your account, specified by ID. */
+ /** Lists Databricks storage configurations for an account, specified by ID. */
public Iterable list() {
return impl.list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageConfiguration.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageConfiguration.java
index a20cf9f3b..e8af04fa9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageConfiguration.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageConfiguration.java
@@ -9,7 +9,7 @@
@Generated
public class StorageConfiguration {
- /** The Databricks account ID that hosts the credential. */
+ /** The Databricks account ID associated with this storage configuration. */
@JsonProperty("account_id")
private String accountId;
@@ -17,7 +17,17 @@ public class StorageConfiguration {
@JsonProperty("creation_time")
private Long creationTime;
- /** */
+ /**
+ * Optional IAM role that is used to access the workspace catalog which is created during
+ * workspace creation for UC by Default. If a storage configuration with this field populated is
+ * used to create a workspace, then a workspace catalog is created together with the workspace.
+ * The workspace catalog shares the root bucket with internal workspace storage (including DBFS
+ * root) but uses a dedicated bucket path prefix.
+ */
+ @JsonProperty("role_arn")
+ private String roleArn;
+
+ /** The root bucket information for the storage configuration. */
@JsonProperty("root_bucket_info")
private RootBucketInfo rootBucketInfo;
@@ -47,6 +57,15 @@ public Long getCreationTime() {
return creationTime;
}
+ public StorageConfiguration setRoleArn(String roleArn) {
+ this.roleArn = roleArn;
+ return this;
+ }
+
+ public String getRoleArn() {
+ return roleArn;
+ }
+
public StorageConfiguration setRootBucketInfo(RootBucketInfo rootBucketInfo) {
this.rootBucketInfo = rootBucketInfo;
return this;
@@ -81,6 +100,7 @@ public boolean equals(Object o) {
StorageConfiguration that = (StorageConfiguration) o;
return Objects.equals(accountId, that.accountId)
&& Objects.equals(creationTime, that.creationTime)
+ && Objects.equals(roleArn, that.roleArn)
&& Objects.equals(rootBucketInfo, that.rootBucketInfo)
&& Objects.equals(storageConfigurationId, that.storageConfigurationId)
&& Objects.equals(storageConfigurationName, that.storageConfigurationName);
@@ -89,7 +109,12 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
return Objects.hash(
- accountId, creationTime, rootBucketInfo, storageConfigurationId, storageConfigurationName);
+ accountId,
+ creationTime,
+ roleArn,
+ rootBucketInfo,
+ storageConfigurationId,
+ storageConfigurationName);
}
@Override
@@ -97,6 +122,7 @@ public String toString() {
return new ToStringer(StorageConfiguration.class)
.add("accountId", accountId)
.add("creationTime", creationTime)
+ .add("roleArn", roleArn)
.add("rootBucketInfo", rootBucketInfo)
.add("storageConfigurationId", storageConfigurationId)
.add("storageConfigurationName", storageConfigurationName)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageImpl.java
index 6be79f6d7..e81d1652d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageImpl.java
@@ -34,7 +34,7 @@ public StorageConfiguration create(CreateStorageConfigurationRequest request) {
}
@Override
- public void delete(DeleteStorageRequest request) {
+ public StorageConfiguration delete(DeleteStorageRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/storage-configurations/%s",
@@ -43,7 +43,7 @@ public void delete(DeleteStorageRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, StorageConfiguration.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageService.java
index 8324e8267..ba398e5aa 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StorageService.java
@@ -17,29 +17,18 @@
*/
@Generated
public interface StorageService {
- /**
- * Creates new storage configuration for an account, specified by ID. Uploads a storage
- * configuration object that represents the root AWS S3 bucket in your account. Databricks stores
- * related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket,
- * you need to configure the required bucket policy.
- *
- * For information about how to create a new workspace with this API, see [Create a new
- * workspace using the Account API]
- *
- *
[Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
+ /** Creates a Databricks storage configuration for an account. */
StorageConfiguration create(CreateStorageConfigurationRequest createStorageConfigurationRequest);
/**
* Deletes a Databricks storage configuration. You cannot delete a storage configuration that is
* associated with any workspace.
*/
- void delete(DeleteStorageRequest deleteStorageRequest);
+ StorageConfiguration delete(DeleteStorageRequest deleteStorageRequest);
/** Gets a Databricks storage configuration for an account, both specified by ID. */
StorageConfiguration get(GetStorageRequest getStorageRequest);
- /** Gets a list of all Databricks storage configurations for your account, specified by ID. */
+ /** Lists Databricks storage configurations for an account, specified by ID. */
Collection list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StsRole.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StsRole.java
index ee42e7691..39c3d6d28 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StsRole.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/StsRole.java
@@ -9,26 +9,10 @@
@Generated
public class StsRole {
- /**
- * The external ID that needs to be trusted by the cross-account role. This is always your
- * Databricks account ID.
- */
- @JsonProperty("external_id")
- private String externalId;
-
- /** The Amazon Resource Name (ARN) of the cross account role. */
+ /** The Amazon Resource Name (ARN) of the cross account IAM role. */
@JsonProperty("role_arn")
private String roleArn;
- public StsRole setExternalId(String externalId) {
- this.externalId = externalId;
- return this;
- }
-
- public String getExternalId() {
- return externalId;
- }
-
public StsRole setRoleArn(String roleArn) {
this.roleArn = roleArn;
return this;
@@ -43,19 +27,16 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
StsRole that = (StsRole) o;
- return Objects.equals(externalId, that.externalId) && Objects.equals(roleArn, that.roleArn);
+ return Objects.equals(roleArn, that.roleArn);
}
@Override
public int hashCode() {
- return Objects.hash(externalId, roleArn);
+ return Objects.hash(roleArn);
}
@Override
public String toString() {
- return new ToStringer(StsRole.class)
- .add("externalId", externalId)
- .add("roleArn", roleArn)
- .toString();
+ return new ToStringer(StsRole.class).add("roleArn", roleArn).toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/UpdateWorkspaceRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/UpdateWorkspaceRequest.java
index a690adac9..242a2a8ba 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/UpdateWorkspaceRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/UpdateWorkspaceRequest.java
@@ -3,159 +3,52 @@
package com.databricks.sdk.service.provisioning;
import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
import com.fasterxml.jackson.annotation.JsonProperty;
-import java.util.Map;
import java.util.Objects;
@Generated
public class UpdateWorkspaceRequest {
- /**
- * The AWS region of the workspace's data plane (for example, `us-west-2`). This parameter is
- * available only for updating failed workspaces.
- */
- @JsonProperty("aws_region")
- private String awsRegion;
-
- /**
- * ID of the workspace's credential configuration object. This parameter is available for updating
- * both failed and running workspaces.
- */
- @JsonProperty("credentials_id")
- private String credentialsId;
-
- /**
- * The custom tags key-value pairing that is attached to this workspace. The key-value pair is a
- * string of utf-8 characters. The value can be an empty string, with maximum length of 255
- * characters. The key can be of maximum length of 127 characters, and cannot be empty.
- */
- @JsonProperty("custom_tags")
- private Map customTags;
-
- /**
- * The ID of the workspace's managed services encryption key configuration object. This parameter
- * is available only for updating failed workspaces.
- */
- @JsonProperty("managed_services_customer_managed_key_id")
- private String managedServicesCustomerManagedKeyId;
-
/** */
- @JsonProperty("network_connectivity_config_id")
- private String networkConnectivityConfigId;
-
- /**
- * The ID of the workspace's network configuration object. Used only if you already use a
- * customer-managed VPC. For failed workspaces only, you can switch from a Databricks-managed VPC
- * to a customer-managed VPC by updating the workspace to add a network configuration ID.
- */
- @JsonProperty("network_id")
- private String networkId;
-
- /**
- * The ID of the workspace's private access settings configuration object. This parameter is
- * available only for updating failed workspaces.
- */
- @JsonProperty("private_access_settings_id")
- private String privateAccessSettingsId;
-
- /**
- * The ID of the workspace's storage configuration object. This parameter is available only for
- * updating failed workspaces.
- */
- @JsonProperty("storage_configuration_id")
- private String storageConfigurationId;
+ @JsonProperty("customer_facing_workspace")
+ private Workspace customerFacingWorkspace;
/**
- * The ID of the key configuration object for workspace storage. This parameter is available for
- * updating both failed and running workspaces.
+ * The field mask must be a single string, with multiple fields separated by commas (no spaces).
+ * The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields
+ * (e.g., `author.given_name`). Specification of elements in sequence or map fields is not
+ * allowed, as only the entire collection field can be specified. Field names must exactly match
+ * the resource field names.
+ *
+ * A field mask of `*` indicates full replacement. It’s recommended to always explicitly list
+ * the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if
+ * the API changes in the future.
*/
- @JsonProperty("storage_customer_managed_key_id")
- private String storageCustomerManagedKeyId;
+ @JsonIgnore
+ @QueryParam("update_mask")
+ private String updateMask;
- /** Workspace ID. */
+ /** A unique integer ID for the workspace */
@JsonIgnore private Long workspaceId;
- public UpdateWorkspaceRequest setAwsRegion(String awsRegion) {
- this.awsRegion = awsRegion;
- return this;
- }
-
- public String getAwsRegion() {
- return awsRegion;
- }
-
- public UpdateWorkspaceRequest setCredentialsId(String credentialsId) {
- this.credentialsId = credentialsId;
- return this;
- }
-
- public String getCredentialsId() {
- return credentialsId;
- }
-
- public UpdateWorkspaceRequest setCustomTags(Map customTags) {
- this.customTags = customTags;
- return this;
- }
-
- public Map getCustomTags() {
- return customTags;
- }
-
- public UpdateWorkspaceRequest setManagedServicesCustomerManagedKeyId(
- String managedServicesCustomerManagedKeyId) {
- this.managedServicesCustomerManagedKeyId = managedServicesCustomerManagedKeyId;
- return this;
- }
-
- public String getManagedServicesCustomerManagedKeyId() {
- return managedServicesCustomerManagedKeyId;
- }
-
- public UpdateWorkspaceRequest setNetworkConnectivityConfigId(String networkConnectivityConfigId) {
- this.networkConnectivityConfigId = networkConnectivityConfigId;
- return this;
- }
-
- public String getNetworkConnectivityConfigId() {
- return networkConnectivityConfigId;
- }
-
- public UpdateWorkspaceRequest setNetworkId(String networkId) {
- this.networkId = networkId;
- return this;
- }
-
- public String getNetworkId() {
- return networkId;
- }
-
- public UpdateWorkspaceRequest setPrivateAccessSettingsId(String privateAccessSettingsId) {
- this.privateAccessSettingsId = privateAccessSettingsId;
- return this;
- }
-
- public String getPrivateAccessSettingsId() {
- return privateAccessSettingsId;
- }
-
- public UpdateWorkspaceRequest setStorageConfigurationId(String storageConfigurationId) {
- this.storageConfigurationId = storageConfigurationId;
+ public UpdateWorkspaceRequest setCustomerFacingWorkspace(Workspace customerFacingWorkspace) {
+ this.customerFacingWorkspace = customerFacingWorkspace;
return this;
}
- public String getStorageConfigurationId() {
- return storageConfigurationId;
+ public Workspace getCustomerFacingWorkspace() {
+ return customerFacingWorkspace;
}
- public UpdateWorkspaceRequest setStorageCustomerManagedKeyId(String storageCustomerManagedKeyId) {
- this.storageCustomerManagedKeyId = storageCustomerManagedKeyId;
+ public UpdateWorkspaceRequest setUpdateMask(String updateMask) {
+ this.updateMask = updateMask;
return this;
}
- public String getStorageCustomerManagedKeyId() {
- return storageCustomerManagedKeyId;
+ public String getUpdateMask() {
+ return updateMask;
}
public UpdateWorkspaceRequest setWorkspaceId(Long workspaceId) {
@@ -172,46 +65,21 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
UpdateWorkspaceRequest that = (UpdateWorkspaceRequest) o;
- return Objects.equals(awsRegion, that.awsRegion)
- && Objects.equals(credentialsId, that.credentialsId)
- && Objects.equals(customTags, that.customTags)
- && Objects.equals(
- managedServicesCustomerManagedKeyId, that.managedServicesCustomerManagedKeyId)
- && Objects.equals(networkConnectivityConfigId, that.networkConnectivityConfigId)
- && Objects.equals(networkId, that.networkId)
- && Objects.equals(privateAccessSettingsId, that.privateAccessSettingsId)
- && Objects.equals(storageConfigurationId, that.storageConfigurationId)
- && Objects.equals(storageCustomerManagedKeyId, that.storageCustomerManagedKeyId)
+ return Objects.equals(customerFacingWorkspace, that.customerFacingWorkspace)
+ && Objects.equals(updateMask, that.updateMask)
&& Objects.equals(workspaceId, that.workspaceId);
}
@Override
public int hashCode() {
- return Objects.hash(
- awsRegion,
- credentialsId,
- customTags,
- managedServicesCustomerManagedKeyId,
- networkConnectivityConfigId,
- networkId,
- privateAccessSettingsId,
- storageConfigurationId,
- storageCustomerManagedKeyId,
- workspaceId);
+ return Objects.hash(customerFacingWorkspace, updateMask, workspaceId);
}
@Override
public String toString() {
return new ToStringer(UpdateWorkspaceRequest.class)
- .add("awsRegion", awsRegion)
- .add("credentialsId", credentialsId)
- .add("customTags", customTags)
- .add("managedServicesCustomerManagedKeyId", managedServicesCustomerManagedKeyId)
- .add("networkConnectivityConfigId", networkConnectivityConfigId)
- .add("networkId", networkId)
- .add("privateAccessSettingsId", privateAccessSettingsId)
- .add("storageConfigurationId", storageConfigurationId)
- .add("storageCustomerManagedKeyId", storageCustomerManagedKeyId)
+ .add("customerFacingWorkspace", customerFacingWorkspace)
+ .add("updateMask", updateMask)
.add("workspaceId", workspaceId)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpoint.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpoint.java
index 82fe9b177..ea8b27b53 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpoint.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpoint.java
@@ -7,9 +7,13 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** * */
@Generated
public class VpcEndpoint {
- /** The Databricks account ID that hosts the VPC endpoint configuration. */
+ /**
+ * The Databricks account ID that hosts the VPC endpoint configuration. TODO - This may signal an
+ * OpenAPI diff; it does not show up in the generated spec
+ */
@JsonProperty("account_id")
private String accountId;
@@ -33,7 +37,7 @@ public class VpcEndpoint {
@JsonProperty("aws_vpc_endpoint_id")
private String awsVpcEndpointId;
- /** */
+ /** The cloud info of this vpc endpoint. Info for a GCP vpc endpoint. */
@JsonProperty("gcp_vpc_endpoint_info")
private GcpVpcEndpointInfo gcpVpcEndpointInfo;
@@ -51,7 +55,13 @@ public class VpcEndpoint {
@JsonProperty("state")
private String state;
- /** */
+ /**
+ * This enumeration represents the type of Databricks VPC endpoint service that was used when
+ * creating this VPC endpoint. If the VPC endpoint connects to the Databricks control plane for
+ * either the front-end connection or the back-end REST API connection, the value is
+ * WORKSPACE_ACCESS. If the VPC endpoint connects to the Databricks workspace for the back-end
+ * secure cluster connectivity relay, the value is DATAPLANE_RELAY_ACCESS.
+ */
@JsonProperty("use_case")
private EndpointUseCase useCase;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsAPI.java
index 8c57e5857..e9568104b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsAPI.java
@@ -43,23 +43,16 @@ public VpcEndpoint create(CreateVpcEndpointRequest request) {
return impl.create(request);
}
- public void delete(String vpcEndpointId) {
- delete(new DeleteVpcEndpointRequest().setVpcEndpointId(vpcEndpointId));
+ public VpcEndpoint delete(String vpcEndpointId) {
+ return delete(new DeleteVpcEndpointRequest().setVpcEndpointId(vpcEndpointId));
}
/**
- * Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] that can
- * communicate privately with Databricks over [AWS PrivateLink].
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [AWS VPC endpoint]:
- * https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration
+ * that is associated with any workspace.
*/
- public void delete(DeleteVpcEndpointRequest request) {
- impl.delete(request);
+ public VpcEndpoint delete(DeleteVpcEndpointRequest request) {
+ return impl.delete(request);
}
public VpcEndpoint get(String vpcEndpointId) {
@@ -77,14 +70,7 @@ public VpcEndpoint get(GetVpcEndpointRequest request) {
return impl.get(request);
}
- /**
- * Gets a list of all VPC endpoints for an account, specified by ID.
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[Databricks article about PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Lists Databricks VPC endpoint configurations for an account. */
public Iterable list() {
return impl.list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsImpl.java
index c6141bb98..b11d9f759 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsImpl.java
@@ -33,7 +33,7 @@ public VpcEndpoint create(CreateVpcEndpointRequest request) {
}
@Override
- public void delete(DeleteVpcEndpointRequest request) {
+ public VpcEndpoint delete(DeleteVpcEndpointRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/vpc-endpoints/%s",
@@ -42,7 +42,7 @@ public void delete(DeleteVpcEndpointRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, VpcEndpoint.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsService.java
index 0f4a57c16..8814e90ac 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcEndpointsService.java
@@ -32,17 +32,10 @@ public interface VpcEndpointsService {
VpcEndpoint create(CreateVpcEndpointRequest createVpcEndpointRequest);
/**
- * Deletes a VPC endpoint configuration, which represents an [AWS VPC endpoint] that can
- * communicate privately with Databricks over [AWS PrivateLink].
- *
- * Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[AWS PrivateLink]: https://aws.amazon.com/privatelink [AWS VPC endpoint]:
- * https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html [Databricks article about
- * PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
+ * Deletes a Databricks VPC endpoint configuration. You cannot delete a VPC endpoint configuration
+ * that is associated with any workspace.
*/
- void delete(DeleteVpcEndpointRequest deleteVpcEndpointRequest);
+ VpcEndpoint delete(DeleteVpcEndpointRequest deleteVpcEndpointRequest);
/**
* Gets a VPC endpoint configuration, which represents a [VPC endpoint] object in AWS used to
@@ -53,13 +46,6 @@ public interface VpcEndpointsService {
*/
VpcEndpoint get(GetVpcEndpointRequest getVpcEndpointRequest);
- /**
- * Gets a list of all VPC endpoints for an account, specified by ID.
- *
- *
Before configuring PrivateLink, read the [Databricks article about PrivateLink].
- *
- *
[Databricks article about PrivateLink]:
- * https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html
- */
+ /** Lists Databricks VPC endpoint configurations for an account. */
Collection list();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcStatus.java
index d6f34acaa..eae712fe4 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/VpcStatus.java
@@ -4,14 +4,10 @@
import com.databricks.sdk.support.Generated;
-/**
- * The status of this network configuration object in terms of its use in a workspace: *
- * `UNATTACHED`: Unattached. * `VALID`: Valid. * `BROKEN`: Broken. * `WARNED`: Warned.
- */
@Generated
public enum VpcStatus {
- BROKEN, // Broken.
- UNATTACHED, // Unattached.
- VALID, // Valid.
- WARNED, // Warned.
+ BROKEN,
+ UNATTACHED,
+ VALID,
+ WARNED,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WarningType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WarningType.java
index 184b8f62c..69379d15f 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WarningType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WarningType.java
@@ -5,7 +5,6 @@
import com.databricks.sdk.support.Generated;
import com.fasterxml.jackson.annotation.JsonProperty;
-/** The AWS resource associated with this warning: a subnet or a security group. */
@Generated
public enum WarningType {
@JsonProperty("securityGroup")
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Workspace.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Workspace.java
index ea983a894..d3b48e889 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Workspace.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/Workspace.java
@@ -14,7 +14,7 @@ public class Workspace {
@JsonProperty("account_id")
private String accountId;
- /** The AWS region of the workspace data plane (for example, `us-west-2`). */
+ /** */
@JsonProperty("aws_region")
private String awsRegion;
@@ -30,6 +30,10 @@ public class Workspace {
@JsonProperty("cloud_resource_container")
private CloudResourceContainer cloudResourceContainer;
+ /** The compute mode of the workspace. */
+ @JsonProperty("compute_mode")
+ private CustomerFacingComputeMode computeMode;
+
/** Time in epoch milliseconds when the workspace was created. */
@JsonProperty("creation_time")
private Long creationTime;
@@ -46,22 +50,10 @@ public class Workspace {
@JsonProperty("custom_tags")
private Map customTags;
- /**
- * The deployment name defines part of the subdomain for the workspace. The workspace URL for web
- * application and REST APIs is `.cloud.databricks.com`.
- *
- * This value must be unique across all non-deleted deployments across all AWS regions.
- */
+ /** */
@JsonProperty("deployment_name")
private String deploymentName;
- /**
- * If this workspace is for a external customer, then external_customer_info is populated. If this
- * workspace is not for a external customer, then external_customer_info is empty.
- */
- @JsonProperty("external_customer_info")
- private ExternalCustomerInfo externalCustomerInfo;
-
/** */
@JsonProperty("gcp_managed_network_config")
private GcpManagedNetworkConfig gcpManagedNetworkConfig;
@@ -70,10 +62,6 @@ public class Workspace {
@JsonProperty("gke_config")
private GkeConfig gkeConfig;
- /** Whether no public IP is enabled for the workspace. */
- @JsonProperty("is_no_public_ip_enabled")
- private Boolean isNoPublicIpEnabled;
-
/**
* The Google Cloud region of the workspace data plane in your Google account (for example,
* `us-east4`).
@@ -86,8 +74,20 @@ public class Workspace {
private String managedServicesCustomerManagedKeyId;
/**
- * The network configuration ID that is attached to the workspace. This field is available only if
- * the network is a customer-managed network.
+ * The network configuration for the workspace.
+ *
+ *
DEPRECATED. Use `network_id` instead.
+ */
+ @JsonProperty("network")
+ private WorkspaceNetwork network;
+
+ /** The object ID of network connectivity config. */
+ @JsonProperty("network_connectivity_config_id")
+ private String networkConnectivityConfigId;
+
+ /**
+ * If this workspace is BYO VPC, then the network_id will be populated. If this workspace is not
+ * BYO VPC, then the network_id will be empty.
*/
@JsonProperty("network_id")
private String networkId;
@@ -118,6 +118,10 @@ public class Workspace {
@JsonProperty("storage_customer_managed_key_id")
private String storageCustomerManagedKeyId;
+ /** The storage mode of the workspace. */
+ @JsonProperty("storage_mode")
+ private CustomerFacingStorageMode storageMode;
+
/** A unique integer ID for the workspace */
@JsonProperty("workspace_id")
private Long workspaceId;
@@ -126,7 +130,7 @@ public class Workspace {
@JsonProperty("workspace_name")
private String workspaceName;
- /** */
+ /** The status of a workspace */
@JsonProperty("workspace_status")
private WorkspaceStatus workspaceStatus;
@@ -179,6 +183,15 @@ public CloudResourceContainer getCloudResourceContainer() {
return cloudResourceContainer;
}
+ public Workspace setComputeMode(CustomerFacingComputeMode computeMode) {
+ this.computeMode = computeMode;
+ return this;
+ }
+
+ public CustomerFacingComputeMode getComputeMode() {
+ return computeMode;
+ }
+
public Workspace setCreationTime(Long creationTime) {
this.creationTime = creationTime;
return this;
@@ -215,15 +228,6 @@ public String getDeploymentName() {
return deploymentName;
}
- public Workspace setExternalCustomerInfo(ExternalCustomerInfo externalCustomerInfo) {
- this.externalCustomerInfo = externalCustomerInfo;
- return this;
- }
-
- public ExternalCustomerInfo getExternalCustomerInfo() {
- return externalCustomerInfo;
- }
-
public Workspace setGcpManagedNetworkConfig(GcpManagedNetworkConfig gcpManagedNetworkConfig) {
this.gcpManagedNetworkConfig = gcpManagedNetworkConfig;
return this;
@@ -242,15 +246,6 @@ public GkeConfig getGkeConfig() {
return gkeConfig;
}
- public Workspace setIsNoPublicIpEnabled(Boolean isNoPublicIpEnabled) {
- this.isNoPublicIpEnabled = isNoPublicIpEnabled;
- return this;
- }
-
- public Boolean getIsNoPublicIpEnabled() {
- return isNoPublicIpEnabled;
- }
-
public Workspace setLocation(String location) {
this.location = location;
return this;
@@ -270,6 +265,24 @@ public String getManagedServicesCustomerManagedKeyId() {
return managedServicesCustomerManagedKeyId;
}
+ public Workspace setNetwork(WorkspaceNetwork network) {
+ this.network = network;
+ return this;
+ }
+
+ public WorkspaceNetwork getNetwork() {
+ return network;
+ }
+
+ public Workspace setNetworkConnectivityConfigId(String networkConnectivityConfigId) {
+ this.networkConnectivityConfigId = networkConnectivityConfigId;
+ return this;
+ }
+
+ public String getNetworkConnectivityConfigId() {
+ return networkConnectivityConfigId;
+ }
+
public Workspace setNetworkId(String networkId) {
this.networkId = networkId;
return this;
@@ -315,6 +328,15 @@ public String getStorageCustomerManagedKeyId() {
return storageCustomerManagedKeyId;
}
+ public Workspace setStorageMode(CustomerFacingStorageMode storageMode) {
+ this.storageMode = storageMode;
+ return this;
+ }
+
+ public CustomerFacingStorageMode getStorageMode() {
+ return storageMode;
+ }
+
public Workspace setWorkspaceId(Long workspaceId) {
this.workspaceId = workspaceId;
return this;
@@ -361,22 +383,24 @@ public boolean equals(Object o) {
&& Objects.equals(azureWorkspaceInfo, that.azureWorkspaceInfo)
&& Objects.equals(cloud, that.cloud)
&& Objects.equals(cloudResourceContainer, that.cloudResourceContainer)
+ && Objects.equals(computeMode, that.computeMode)
&& Objects.equals(creationTime, that.creationTime)
&& Objects.equals(credentialsId, that.credentialsId)
&& Objects.equals(customTags, that.customTags)
&& Objects.equals(deploymentName, that.deploymentName)
- && Objects.equals(externalCustomerInfo, that.externalCustomerInfo)
&& Objects.equals(gcpManagedNetworkConfig, that.gcpManagedNetworkConfig)
&& Objects.equals(gkeConfig, that.gkeConfig)
- && Objects.equals(isNoPublicIpEnabled, that.isNoPublicIpEnabled)
&& Objects.equals(location, that.location)
&& Objects.equals(
managedServicesCustomerManagedKeyId, that.managedServicesCustomerManagedKeyId)
+ && Objects.equals(network, that.network)
+ && Objects.equals(networkConnectivityConfigId, that.networkConnectivityConfigId)
&& Objects.equals(networkId, that.networkId)
&& Objects.equals(pricingTier, that.pricingTier)
&& Objects.equals(privateAccessSettingsId, that.privateAccessSettingsId)
&& Objects.equals(storageConfigurationId, that.storageConfigurationId)
&& Objects.equals(storageCustomerManagedKeyId, that.storageCustomerManagedKeyId)
+ && Objects.equals(storageMode, that.storageMode)
&& Objects.equals(workspaceId, that.workspaceId)
&& Objects.equals(workspaceName, that.workspaceName)
&& Objects.equals(workspaceStatus, that.workspaceStatus)
@@ -391,21 +415,23 @@ public int hashCode() {
azureWorkspaceInfo,
cloud,
cloudResourceContainer,
+ computeMode,
creationTime,
credentialsId,
customTags,
deploymentName,
- externalCustomerInfo,
gcpManagedNetworkConfig,
gkeConfig,
- isNoPublicIpEnabled,
location,
managedServicesCustomerManagedKeyId,
+ network,
+ networkConnectivityConfigId,
networkId,
pricingTier,
privateAccessSettingsId,
storageConfigurationId,
storageCustomerManagedKeyId,
+ storageMode,
workspaceId,
workspaceName,
workspaceStatus,
@@ -420,21 +446,23 @@ public String toString() {
.add("azureWorkspaceInfo", azureWorkspaceInfo)
.add("cloud", cloud)
.add("cloudResourceContainer", cloudResourceContainer)
+ .add("computeMode", computeMode)
.add("creationTime", creationTime)
.add("credentialsId", credentialsId)
.add("customTags", customTags)
.add("deploymentName", deploymentName)
- .add("externalCustomerInfo", externalCustomerInfo)
.add("gcpManagedNetworkConfig", gcpManagedNetworkConfig)
.add("gkeConfig", gkeConfig)
- .add("isNoPublicIpEnabled", isNoPublicIpEnabled)
.add("location", location)
.add("managedServicesCustomerManagedKeyId", managedServicesCustomerManagedKeyId)
+ .add("network", network)
+ .add("networkConnectivityConfigId", networkConnectivityConfigId)
.add("networkId", networkId)
.add("pricingTier", pricingTier)
.add("privateAccessSettingsId", privateAccessSettingsId)
.add("storageConfigurationId", storageConfigurationId)
.add("storageCustomerManagedKeyId", storageCustomerManagedKeyId)
+ .add("storageMode", storageMode)
.add("workspaceId", workspaceId)
.add("workspaceName", workspaceName)
.add("workspaceStatus", workspaceStatus)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java
new file mode 100755
index 000000000..478a1fd93
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceNetwork.java
@@ -0,0 +1,89 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.provisioning;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+/** The network configuration for workspaces. */
+@Generated
+public class WorkspaceNetwork {
+ /**
+ * The shared network config for GCP workspace. This object has common network configurations that
+ * are network attributions of a workspace. This object is input-only.
+ */
+ @JsonProperty("gcp_common_network_config")
+ private GcpCommonNetworkConfig gcpCommonNetworkConfig;
+
+ /**
+ * The mutually exclusive network deployment modes. The option decides which network mode the
+ * workspace will use. The network config for GCP workspace with Databricks managed network. This
+ * object is input-only and will not be provided when listing workspaces. See
+ * go/gcp-byovpc-alpha-design for interface decisions.
+ */
+ @JsonProperty("gcp_managed_network_config")
+ private GcpManagedNetworkConfig gcpManagedNetworkConfig;
+
+ /**
+ * The ID of the network object, if the workspace is a BYOVPC workspace. This should apply to
+ * workspaces on all clouds in internal services. In accounts-rest-api, user will use
+ * workspace.network_id for input and output instead. Currently (2021-06-19) the network ID is
+ * only used by GCP.
+ */
+ @JsonProperty("network_id")
+ private String networkId;
+
+ public WorkspaceNetwork setGcpCommonNetworkConfig(GcpCommonNetworkConfig gcpCommonNetworkConfig) {
+ this.gcpCommonNetworkConfig = gcpCommonNetworkConfig;
+ return this;
+ }
+
+ public GcpCommonNetworkConfig getGcpCommonNetworkConfig() {
+ return gcpCommonNetworkConfig;
+ }
+
+ public WorkspaceNetwork setGcpManagedNetworkConfig(
+ GcpManagedNetworkConfig gcpManagedNetworkConfig) {
+ this.gcpManagedNetworkConfig = gcpManagedNetworkConfig;
+ return this;
+ }
+
+ public GcpManagedNetworkConfig getGcpManagedNetworkConfig() {
+ return gcpManagedNetworkConfig;
+ }
+
+ public WorkspaceNetwork setNetworkId(String networkId) {
+ this.networkId = networkId;
+ return this;
+ }
+
+ public String getNetworkId() {
+ return networkId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ WorkspaceNetwork that = (WorkspaceNetwork) o;
+ return Objects.equals(gcpCommonNetworkConfig, that.gcpCommonNetworkConfig)
+ && Objects.equals(gcpManagedNetworkConfig, that.gcpManagedNetworkConfig)
+ && Objects.equals(networkId, that.networkId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(gcpCommonNetworkConfig, gcpManagedNetworkConfig, networkId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(WorkspaceNetwork.class)
+ .add("gcpCommonNetworkConfig", gcpCommonNetworkConfig)
+ .add("gcpManagedNetworkConfig", gcpManagedNetworkConfig)
+ .add("networkId", networkId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java
index c6614660a..628d49e35 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspaceStatus.java
@@ -5,8 +5,12 @@
import com.databricks.sdk.support.Generated;
/**
- * The status of the workspace. For workspace creation, usually it is set to `PROVISIONING`
- * initially. Continue to check the status until the status is `RUNNING`.
+ * The different statuses of a workspace. The following represents the current set of valid
+ * transitions from status to status: NOT_PROVISIONED -> PROVISIONING -> CANCELLED PROVISIONING ->
+ * RUNNING -> FAILED -> CANCELLED (note that this transition is disallowed in the MultiWorkspace
+ * Project) RUNNING -> PROVISIONING -> BANNED -> CANCELLED FAILED -> PROVISIONING -> CANCELLED
+ * BANNED -> RUNNING -> CANCELLED Note that a transition from any state to itself is also valid.
+ * TODO(PLAT-5867): add a transition from CANCELLED to some other value (e.g. RECOVERING)
*/
@Generated
public enum WorkspaceStatus {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java
index 879c61231..479e91661 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesAPI.java
@@ -83,14 +83,38 @@ public Workspace waitGetWorkspaceRunning(
}
/**
- * Creates a new workspace.
+ * Creates a new workspace using a credential configuration and a storage configuration, an
+ * optional network configuration (if using a customer-managed VPC), an optional managed services
+ * key configuration (if using customer-managed keys for managed services), and an optional
+ * storage key configuration (if using customer-managed keys for storage). The key configurations
+ * used for managed services and storage encryption can be the same or different.
+ *
+ *
Important: This operation is asynchronous. A response with HTTP status code 200 means the
+ * request has been accepted and is in progress, but does not mean that the workspace deployed
+ * successfully and is running. The initial workspace status is typically PROVISIONING. Use the
+ * workspace ID (workspace_id) field in the response to identify the new workspace and make
+ * repeated GET requests with the workspace ID and check its status. The workspace becomes
+ * available when the status changes to RUNNING.
+ *
+ *
You can share one customer-managed VPC with multiple workspaces in a single account. It is
+ * not required to create a new VPC for each workspace. However, you cannot reuse subnets or
+ * Security Groups between workspaces. If you plan to share one VPC with multiple workspaces, make
+ * sure you size your VPC and subnets accordingly. Because a Databricks Account API network
+ * configuration encapsulates this information, you cannot reuse a Databricks Account API network
+ * configuration across workspaces.
+ *
+ *
For information about how to create a new workspace with this API including error handling,
+ * see [Create a new workspace using the Account API].
+ *
+ *
Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a
+ * limited set of deployment and subscription types. If you have questions about availability,
+ * contact your Databricks representative.
*
- *
**Important**: This operation is asynchronous. A response with HTTP status code 200 means
- * the request has been accepted and is in progress, but does not mean that the workspace deployed
- * successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the
- * workspace ID (`workspace_id`) field in the response to identify the new workspace and make
- * repeated `GET` requests with the workspace ID and check its status. The workspace becomes
- * available when the status changes to `RUNNING`.
+ *
This operation is available only if your account is on the E2 version of the platform or on
+ * a select custom plan that allows multiple workspaces per account.
+ *
+ *
[Create a new workspace using the Account API]:
+ * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
*/
public Wait create(CreateWorkspaceRequest request) {
Workspace response = impl.create(request);
@@ -100,20 +124,13 @@ public Wait create(CreateWorkspaceRequest request) {
response);
}
- public void delete(long workspaceId) {
- delete(new DeleteWorkspaceRequest().setWorkspaceId(workspaceId));
+ public Workspace delete(long workspaceId) {
+ return delete(new DeleteWorkspaceRequest().setWorkspaceId(workspaceId));
}
- /**
- * Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate.
- * However, it might take a few minutes for all workspaces resources to be deleted, depending on
- * the size and number of workspace resources.
- *
- * This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- */
- public void delete(DeleteWorkspaceRequest request) {
- impl.delete(request);
+ /** Deletes a Databricks workspace, both specified by ID. */
+ public Workspace delete(DeleteWorkspaceRequest request) {
+ return impl.delete(request);
}
public Workspace get(long workspaceId) {
@@ -124,13 +141,9 @@ public Workspace get(long workspaceId) {
* Gets information including status for a Databricks workspace, specified by ID. In the response,
* the `workspace_status` field indicates the current status. After initial workspace creation
* (which is asynchronous), make repeated `GET` requests with the workspace ID and check its
- * status. The workspace becomes available when the status changes to `RUNNING`.
- *
- *
For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- *
This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
+ * status. The workspace becomes available when the status changes to `RUNNING`. For information
+ * about how to create a new workspace with this API **including error handling**, see [Create a
+ * new workspace using the Account API].
*
*
[Create a new workspace using the Account API]:
* http://docs.databricks.com/administration-guide/account-api/new-workspace.html
@@ -139,124 +152,18 @@ public Workspace get(GetWorkspaceRequest request) {
return impl.get(request);
}
- /**
- * Gets a list of all workspaces associated with an account, specified by ID.
- *
- *
This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- */
+ /** Lists Databricks workspaces for an account. */
public Iterable list() {
return impl.list();
}
- /**
- * Updates a workspace configuration for either a running workspace or a failed workspace. The
- * elements that can be updated varies between these two use cases.
- *
- * ### Update a failed workspace You can update a Databricks workspace configuration for failed
- * workspace deployment for some fields, but not all fields. For a failed workspace, this request
- * supports updates to the following fields only: - Credential configuration ID - Storage
- * configuration ID - Network configuration ID. Used only to add or change a network configuration
- * for a customer-managed VPC. For a failed workspace only, you can convert a workspace with
- * Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a
- * workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the
- * network configuration for a failed or running workspace to add PrivateLink support, though you
- * must also add a private access settings object. - Key configuration ID for managed services
- * (control plane storage, such as notebook source and Databricks SQL queries). Used only if you
- * use customer-managed keys for managed services. - Key configuration ID for workspace storage
- * (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for
- * workspace storage. **Important**: If the workspace was ever in the running state, even if
- * briefly before becoming a failed workspace, you cannot add a new key configuration ID for
- * workspace storage. - Private access settings ID to add PrivateLink support. You can add or
- * update the private access settings ID to upgrade a workspace to add support for front-end,
- * back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end
- * or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty
- * custom tags, the update would not be applied. - Network connectivity configuration ID to add
- * serverless stable IP support. You can add or update the network connectivity configuration ID
- * to ensure the workspace uses the same set of stable IP CIDR blocks to access your resources.
- * You cannot remove a network connectivity configuration from the workspace once attached, you
- * can only switch to another one.
- *
- *
After calling the `PATCH` operation to update the workspace configuration, make repeated
- * `GET` requests with the workspace ID and check the workspace status. The workspace is
- * successful if the status changes to `RUNNING`.
- *
- *
For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- *
### Update a running workspace You can update a Databricks workspace configuration for
- * running workspaces for some fields, but not all fields. For a running workspace, this request
- * supports updating the following fields only: - Credential configuration ID - Network
- * configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a
- * running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a
- * network configuration update in this API for a failed or running workspace to add support for
- * PrivateLink, although you also need to add a private access settings object. - Key
- * configuration ID for managed services (control plane storage, such as notebook source and
- * Databricks SQL queries). Databricks does not directly encrypt the data with the
- * customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK)
- * that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the
- * DEK to encrypt your workspace's managed services persisted data. If the workspace does not
- * already have a CMK for managed services, adding this ID enables managed services encryption for
- * new or updated data. Existing managed services data that existed before adding the key remains
- * not encrypted with the DEK until it is modified. If the workspace already has customer-managed
- * keys for managed services, this request rotates (changes) the CMK keys and the DEK is
- * re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root
- * S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not
- * already have a customer-managed key configuration for workspace storage. - Private access
- * settings ID to add PrivateLink support. You can add or update the private access settings ID to
- * upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You
- * cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a
- * workspace. - Custom tags. Given you provide an empty custom tags, the update would not be
- * applied. - Network connectivity configuration ID to add serverless stable IP support. You can
- * add or update the network connectivity configuration ID to ensure the workspace uses the same
- * set of stable IP CIDR blocks to access your resources. You cannot remove a network connectivity
- * configuration from the workspace once attached, you can only switch to another one.
- *
- *
**Important**: To update a running workspace, your workspace must have no running compute
- * resources that run in your workspace's VPC in the Classic data plane. For example, stop all
- * all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If
- * you do not terminate all cluster instances in the workspace before calling this API, the
- * request will fail.
- *
- *
### Wait until changes take effect. After calling the `PATCH` operation to update the
- * workspace configuration, make repeated `GET` requests with the workspace ID and check the
- * workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC,
- * the workspace status becomes `PROVISIONING` temporarily (typically under 20 minutes). If the
- * workspace update is successful, the workspace status changes to `RUNNING`. Note that you can
- * also check the workspace status in the [Account Console]. However, you cannot use or create
- * clusters for another 20 minutes after that status change. This results in a total of up to 40
- * minutes in which you cannot create clusters. If you create or use clusters before this time
- * interval elapses, clusters do not launch successfully, fail, or could cause other unexpected
- * behavior. * For workspaces with a customer-managed VPC, the workspace status stays at status
- * `RUNNING` and the VPC change happens immediately. A change to the storage customer-managed key
- * configuration ID might take a few minutes to update, so continue to check the workspace until
- * you observe that it has been updated. If the update fails, the workspace might revert silently
- * to its original configuration. After the workspace has been updated, you cannot use or create
- * clusters for another 20 minutes. If you create or use clusters before this time interval
- * elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.
- *
- *
If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the
- * changes to fully take effect. During the 20 minute wait, it is important that you stop all REST
- * API calls to the DBFS API. If you are modifying _only the managed services key configuration_,
- * you can omit the 20 minute wait.
- *
- *
**Important**: Customer-managed keys and customer-managed VPCs are supported by only some
- * deployment types and subscription types. If you have questions about availability, contact your
- * Databricks representative.
- *
- *
This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- *
- *
[Account Console]:
- * https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
- * [Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
- public Wait update(UpdateWorkspaceRequest request) {
- impl.update(request);
+ /** Updates a workspace. */
+ public Wait update(UpdateWorkspaceRequest request) {
+ Workspace response = impl.update(request);
return new Wait<>(
(timeout, callback) ->
- waitGetWorkspaceRunning(request.getWorkspaceId(), timeout, callback));
+ waitGetWorkspaceRunning(response.getWorkspaceId(), timeout, callback),
+ response);
}
public WorkspacesService impl() {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesImpl.java
index 61557b7dd..f07945f8c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesImpl.java
@@ -32,7 +32,7 @@ public Workspace create(CreateWorkspaceRequest request) {
}
@Override
- public void delete(DeleteWorkspaceRequest request) {
+ public Workspace delete(DeleteWorkspaceRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/workspaces/%s",
@@ -41,7 +41,7 @@ public void delete(DeleteWorkspaceRequest request) {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, Workspace.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
@@ -72,17 +72,18 @@ public Collection list() {
}
@Override
- public void update(UpdateWorkspaceRequest request) {
+ public Workspace update(UpdateWorkspaceRequest request) {
String path =
String.format(
"/api/2.0/accounts/%s/workspaces/%s",
apiClient.configuredAccountID(), request.getWorkspaceId());
try {
- Request req = new Request("PATCH", path, apiClient.serialize(request));
+ Request req =
+ new Request("PATCH", path, apiClient.serialize(request.getCustomerFacingWorkspace()));
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- apiClient.execute(req, Void.class);
+ return apiClient.execute(req, Workspace.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesService.java
index 7679504a0..8388eeb18 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/provisioning/WorkspacesService.java
@@ -20,154 +20,60 @@
@Generated
public interface WorkspacesService {
/**
- * Creates a new workspace.
- *
- * **Important**: This operation is asynchronous. A response with HTTP status code 200 means
- * the request has been accepted and is in progress, but does not mean that the workspace deployed
- * successfully and is running. The initial workspace status is typically `PROVISIONING`. Use the
- * workspace ID (`workspace_id`) field in the response to identify the new workspace and make
- * repeated `GET` requests with the workspace ID and check its status. The workspace becomes
- * available when the status changes to `RUNNING`.
- */
- Workspace create(CreateWorkspaceRequest createWorkspaceRequest);
-
- /**
- * Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate.
- * However, it might take a few minutes for all workspaces resources to be deleted, depending on
- * the size and number of workspace resources.
+ * Creates a new workspace using a credential configuration and a storage configuration, an
+ * optional network configuration (if using a customer-managed VPC), an optional managed services
+ * key configuration (if using customer-managed keys for managed services), and an optional
+ * storage key configuration (if using customer-managed keys for storage). The key configurations
+ * used for managed services and storage encryption can be the same or different.
+ *
+ *
Important: This operation is asynchronous. A response with HTTP status code 200 means the
+ * request has been accepted and is in progress, but does not mean that the workspace deployed
+ * successfully and is running. The initial workspace status is typically PROVISIONING. Use the
+ * workspace ID (workspace_id) field in the response to identify the new workspace and make
+ * repeated GET requests with the workspace ID and check its status. The workspace becomes
+ * available when the status changes to RUNNING.
+ *
+ *
You can share one customer-managed VPC with multiple workspaces in a single account. It is
+ * not required to create a new VPC for each workspace. However, you cannot reuse subnets or
+ * Security Groups between workspaces. If you plan to share one VPC with multiple workspaces, make
+ * sure you size your VPC and subnets accordingly. Because a Databricks Account API network
+ * configuration encapsulates this information, you cannot reuse a Databricks Account API network
+ * configuration across workspaces.
+ *
+ *
For information about how to create a new workspace with this API including error handling,
+ * see [Create a new workspace using the Account API].
+ *
+ *
Important: Customer-managed VPCs, PrivateLink, and customer-managed keys are supported on a
+ * limited set of deployment and subscription types. If you have questions about availability,
+ * contact your Databricks representative.
*
*
This operation is available only if your account is on the E2 version of the platform or on
* a select custom plan that allows multiple workspaces per account.
+ *
+ *
[Create a new workspace using the Account API]:
+ * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
*/
- void delete(DeleteWorkspaceRequest deleteWorkspaceRequest);
+ Workspace create(CreateWorkspaceRequest createWorkspaceRequest);
+
+ /** Deletes a Databricks workspace, both specified by ID. */
+ Workspace delete(DeleteWorkspaceRequest deleteWorkspaceRequest);
/**
* Gets information including status for a Databricks workspace, specified by ID. In the response,
* the `workspace_status` field indicates the current status. After initial workspace creation
* (which is asynchronous), make repeated `GET` requests with the workspace ID and check its
- * status. The workspace becomes available when the status changes to `RUNNING`.
- *
- *
For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- *
This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
+ * status. The workspace becomes available when the status changes to `RUNNING`. For information
+ * about how to create a new workspace with this API **including error handling**, see [Create a
+ * new workspace using the Account API].
*
*
[Create a new workspace using the Account API]:
* http://docs.databricks.com/administration-guide/account-api/new-workspace.html
*/
Workspace get(GetWorkspaceRequest getWorkspaceRequest);
- /**
- * Gets a list of all workspaces associated with an account, specified by ID.
- *
- *
This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- */
+ /** Lists Databricks workspaces for an account. */
Collection list();
- /**
- * Updates a workspace configuration for either a running workspace or a failed workspace. The
- * elements that can be updated varies between these two use cases.
- *
- * ### Update a failed workspace You can update a Databricks workspace configuration for failed
- * workspace deployment for some fields, but not all fields. For a failed workspace, this request
- * supports updates to the following fields only: - Credential configuration ID - Storage
- * configuration ID - Network configuration ID. Used only to add or change a network configuration
- * for a customer-managed VPC. For a failed workspace only, you can convert a workspace with
- * Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a
- * workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the
- * network configuration for a failed or running workspace to add PrivateLink support, though you
- * must also add a private access settings object. - Key configuration ID for managed services
- * (control plane storage, such as notebook source and Databricks SQL queries). Used only if you
- * use customer-managed keys for managed services. - Key configuration ID for workspace storage
- * (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for
- * workspace storage. **Important**: If the workspace was ever in the running state, even if
- * briefly before becoming a failed workspace, you cannot add a new key configuration ID for
- * workspace storage. - Private access settings ID to add PrivateLink support. You can add or
- * update the private access settings ID to upgrade a workspace to add support for front-end,
- * back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end
- * or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty
- * custom tags, the update would not be applied. - Network connectivity configuration ID to add
- * serverless stable IP support. You can add or update the network connectivity configuration ID
- * to ensure the workspace uses the same set of stable IP CIDR blocks to access your resources.
- * You cannot remove a network connectivity configuration from the workspace once attached, you
- * can only switch to another one.
- *
- *
After calling the `PATCH` operation to update the workspace configuration, make repeated
- * `GET` requests with the workspace ID and check the workspace status. The workspace is
- * successful if the status changes to `RUNNING`.
- *
- *
For information about how to create a new workspace with this API **including error
- * handling**, see [Create a new workspace using the Account API].
- *
- *
### Update a running workspace You can update a Databricks workspace configuration for
- * running workspaces for some fields, but not all fields. For a running workspace, this request
- * supports updating the following fields only: - Credential configuration ID - Network
- * configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a
- * running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a
- * network configuration update in this API for a failed or running workspace to add support for
- * PrivateLink, although you also need to add a private access settings object. - Key
- * configuration ID for managed services (control plane storage, such as notebook source and
- * Databricks SQL queries). Databricks does not directly encrypt the data with the
- * customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK)
- * that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the
- * DEK to encrypt your workspace's managed services persisted data. If the workspace does not
- * already have a CMK for managed services, adding this ID enables managed services encryption for
- * new or updated data. Existing managed services data that existed before adding the key remains
- * not encrypted with the DEK until it is modified. If the workspace already has customer-managed
- * keys for managed services, this request rotates (changes) the CMK keys and the DEK is
- * re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root
- * S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not
- * already have a customer-managed key configuration for workspace storage. - Private access
- * settings ID to add PrivateLink support. You can add or update the private access settings ID to
- * upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You
- * cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a
- * workspace. - Custom tags. Given you provide an empty custom tags, the update would not be
- * applied. - Network connectivity configuration ID to add serverless stable IP support. You can
- * add or update the network connectivity configuration ID to ensure the workspace uses the same
- * set of stable IP CIDR blocks to access your resources. You cannot remove a network connectivity
- * configuration from the workspace once attached, you can only switch to another one.
- *
- *
**Important**: To update a running workspace, your workspace must have no running compute
- * resources that run in your workspace's VPC in the Classic data plane. For example, stop all
- * all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If
- * you do not terminate all cluster instances in the workspace before calling this API, the
- * request will fail.
- *
- *
### Wait until changes take effect. After calling the `PATCH` operation to update the
- * workspace configuration, make repeated `GET` requests with the workspace ID and check the
- * workspace status and the status of the fields. * For workspaces with a Databricks-managed VPC,
- * the workspace status becomes `PROVISIONING` temporarily (typically under 20 minutes). If the
- * workspace update is successful, the workspace status changes to `RUNNING`. Note that you can
- * also check the workspace status in the [Account Console]. However, you cannot use or create
- * clusters for another 20 minutes after that status change. This results in a total of up to 40
- * minutes in which you cannot create clusters. If you create or use clusters before this time
- * interval elapses, clusters do not launch successfully, fail, or could cause other unexpected
- * behavior. * For workspaces with a customer-managed VPC, the workspace status stays at status
- * `RUNNING` and the VPC change happens immediately. A change to the storage customer-managed key
- * configuration ID might take a few minutes to update, so continue to check the workspace until
- * you observe that it has been updated. If the update fails, the workspace might revert silently
- * to its original configuration. After the workspace has been updated, you cannot use or create
- * clusters for another 20 minutes. If you create or use clusters before this time interval
- * elapses, clusters do not launch successfully, fail, or could cause other unexpected behavior.
- *
- *
If you update the _storage_ customer-managed key configurations, it takes 20 minutes for the
- * changes to fully take effect. During the 20 minute wait, it is important that you stop all REST
- * API calls to the DBFS API. If you are modifying _only the managed services key configuration_,
- * you can omit the 20 minute wait.
- *
- *
**Important**: Customer-managed keys and customer-managed VPCs are supported by only some
- * deployment types and subscription types. If you have questions about availability, contact your
- * Databricks representative.
- *
- *
This operation is available only if your account is on the E2 version of the platform or on
- * a select custom plan that allows multiple workspaces per account.
- *
- *
[Account Console]:
- * https://docs.databricks.com/administration-guide/account-settings-e2/account-console-e2.html
- * [Create a new workspace using the Account API]:
- * http://docs.databricks.com/administration-guide/account-api/new-workspace.html
- */
- void update(UpdateWorkspaceRequest updateWorkspaceRequest);
+ /** Updates a workspace. */
+ Workspace update(UpdateWorkspaceRequest updateWorkspaceRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java
index ce08dd45a..509c6bbf1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsAPI.java
@@ -252,6 +252,12 @@ public Wait updateConfig(
response);
}
+ /** Updates the email and webhook notification settings for an endpoint. */
+ public UpdateInferenceEndpointNotificationsResponse updateNotifications(
+ UpdateInferenceEndpointNotifications request) {
+ return impl.updateNotifications(request);
+ }
+
/**
* Updates the permissions on a serving endpoint. Serving endpoints can inherit permissions from
* their root object.
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsImpl.java
index 65de06770..db7231266 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsImpl.java
@@ -270,6 +270,21 @@ public ServingEndpointDetailed updateConfig(EndpointCoreConfigInput request) {
}
}
+ @Override
+ public UpdateInferenceEndpointNotificationsResponse updateNotifications(
+ UpdateInferenceEndpointNotifications request) {
+ String path = String.format("/api/2.0/serving-endpoints/%s/notifications", request.getName());
+ try {
+ Request req = new Request("PATCH", path, apiClient.serialize(request));
+ ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
+ req.withHeader("Content-Type", "application/json");
+ return apiClient.execute(req, UpdateInferenceEndpointNotificationsResponse.class);
+ } catch (IOException e) {
+ throw new DatabricksException("IO error: " + e.getMessage(), e);
+ }
+ }
+
@Override
public ServingEndpointPermissions updatePermissions(ServingEndpointPermissionsRequest request) {
String path =
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsService.java
index b3d0a60cc..b6c99591d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/ServingEndpointsService.java
@@ -99,6 +99,10 @@ ServingEndpointPermissions setPermissions(
*/
ServingEndpointDetailed updateConfig(EndpointCoreConfigInput endpointCoreConfigInput);
+ /** Updates the email and webhook notification settings for an endpoint. */
+ UpdateInferenceEndpointNotificationsResponse updateNotifications(
+ UpdateInferenceEndpointNotifications updateInferenceEndpointNotifications);
+
/**
* Updates the permissions on a serving endpoint. Serving endpoints can inherit permissions from
* their root object.
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotifications.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotifications.java
new file mode 100755
index 000000000..181da16f8
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotifications.java
@@ -0,0 +1,65 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.serving;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateInferenceEndpointNotifications {
+ /**
+ * The email notification settings to update. Specify email addresses to notify when endpoint
+ * state changes occur.
+ */
+ @JsonProperty("email_notifications")
+ private EmailNotifications emailNotifications;
+
+ /**
+ * The name of the serving endpoint whose notifications are being updated. This field is required.
+ */
+ @JsonIgnore private String name;
+
+ public UpdateInferenceEndpointNotifications setEmailNotifications(
+ EmailNotifications emailNotifications) {
+ this.emailNotifications = emailNotifications;
+ return this;
+ }
+
+ public EmailNotifications getEmailNotifications() {
+ return emailNotifications;
+ }
+
+ public UpdateInferenceEndpointNotifications setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateInferenceEndpointNotifications that = (UpdateInferenceEndpointNotifications) o;
+ return Objects.equals(emailNotifications, that.emailNotifications)
+ && Objects.equals(name, that.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(emailNotifications, name);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateInferenceEndpointNotifications.class)
+ .add("emailNotifications", emailNotifications)
+ .add("name", name)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotificationsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotificationsResponse.java
new file mode 100755
index 000000000..1ba0b7e66
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/serving/UpdateInferenceEndpointNotificationsResponse.java
@@ -0,0 +1,61 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.serving;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class UpdateInferenceEndpointNotificationsResponse {
+ /** */
+ @JsonProperty("email_notifications")
+ private EmailNotifications emailNotifications;
+
+ /** */
+ @JsonProperty("name")
+ private String name;
+
+ public UpdateInferenceEndpointNotificationsResponse setEmailNotifications(
+ EmailNotifications emailNotifications) {
+ this.emailNotifications = emailNotifications;
+ return this;
+ }
+
+ public EmailNotifications getEmailNotifications() {
+ return emailNotifications;
+ }
+
+ public UpdateInferenceEndpointNotificationsResponse setName(String name) {
+ this.name = name;
+ return this;
+ }
+
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ UpdateInferenceEndpointNotificationsResponse that =
+ (UpdateInferenceEndpointNotificationsResponse) o;
+ return Objects.equals(emailNotifications, that.emailNotifications)
+ && Objects.equals(name, that.name);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(emailNotifications, name);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(UpdateInferenceEndpointNotificationsResponse.class)
+ .add("emailNotifications", emailNotifications)
+ .add("name", name)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/BaseChunkInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/BaseChunkInfo.java
index 1a58b0485..2595fc462 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/BaseChunkInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/BaseChunkInfo.java
@@ -7,10 +7,6 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
-/**
- * Describes metadata for a particular chunk, within a result set; this structure is used both
- * within a manifest, and when fetching individual chunk data or links.
- */
@Generated
public class BaseChunkInfo {
/**
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequest.java
index 71850c011..2c29ba7cb 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequest.java
@@ -7,14 +7,14 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/** Creates a new SQL warehouse. */
@Generated
public class CreateWarehouseRequest {
/**
* The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries)
* before it is automatically stopped.
*
- * Supported values: - Must be >= 0 mins for serverless warehouses - Must be == 0 or >= 10 mins
- * for non-serverless warehouses - 0 indicates no autostop.
+ *
Supported values: - Must be == 0 or >= 10 mins - 0 indicates no autostop.
*
*
Defaults to 120 mins
*/
@@ -59,7 +59,7 @@ public class CreateWarehouseRequest {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- *
Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ *
Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
*
Defaults to min_clusters if unset.
*/
@@ -87,7 +87,7 @@ public class CreateWarehouseRequest {
@JsonProperty("name")
private String name;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
@@ -100,7 +100,10 @@ public class CreateWarehouseRequest {
@JsonProperty("tags")
private EndpointTags tags;
- /** */
+ /**
+ * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to
+ * `PRO` and also set the field `enable_serverless_compute` to `true`.
+ */
@JsonProperty("warehouse_type")
private CreateWarehouseRequestWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java
index f0b104ca6..03a31eee6 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/CreateWarehouseRequestWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum CreateWarehouseRequestWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java
index d07e8e25b..251b1555b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequest.java
@@ -8,6 +8,11 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/**
+ * This is an incremental edit functionality, so all fields except id are optional. If a field is
+ * set, the corresponding configuration in the SQL warehouse is modified. If a field is unset, the
+ * existing configuration value in the SQL warehouse is retained. Thus, this API is not idempotent.
+ */
@Generated
public class EditWarehouseRequest {
/**
@@ -48,7 +53,7 @@ public class EditWarehouseRequest {
@JsonProperty("enable_photon")
private Boolean enablePhoton;
- /** Configures whether the warehouse should use serverless compute. */
+ /** Configures whether the warehouse should use serverless compute */
@JsonProperty("enable_serverless_compute")
private Boolean enableServerlessCompute;
@@ -62,7 +67,7 @@ public class EditWarehouseRequest {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- *
Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ *
Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
*
Defaults to min_clusters if unset.
*/
@@ -90,7 +95,7 @@ public class EditWarehouseRequest {
@JsonProperty("name")
private String name;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
@@ -103,7 +108,10 @@ public class EditWarehouseRequest {
@JsonProperty("tags")
private EndpointTags tags;
- /** */
+ /**
+ * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to
+ * `PRO` and also set the field `enable_serverless_compute` to `true`.
+ */
@JsonProperty("warehouse_type")
private EditWarehouseRequestWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java
index 5c4337caa..b180bfd82 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EditWarehouseRequestWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum EditWarehouseRequestWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java
index 243675c94..5caff457b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointHealth.java
@@ -24,7 +24,7 @@ public class EndpointHealth {
@JsonProperty("message")
private String message;
- /** */
+ /** Health status of the endpoint. */
@JsonProperty("status")
private Status status;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java
index 92868d39b..03ae34840 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfo.java
@@ -70,7 +70,7 @@ public class EndpointInfo {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- *
Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ *
Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
*
Defaults to min_clusters if unset.
*/
@@ -110,11 +110,11 @@ public class EndpointInfo {
@JsonProperty("odbc_params")
private OdbcParams odbcParams;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
- /** */
+ /** state of the endpoint */
@JsonProperty("state")
private State state;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java
index 320369adf..498289a1d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/EndpointInfoWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum EndpointInfoWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java
index cc6231890..5aa72af6c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExecuteStatementRequest.java
@@ -15,7 +15,7 @@ public class ExecuteStatementRequest {
* data representations and might not match the final size in the requested `format`. If the
* result was truncated due to the byte limit, then `truncated` in the response is set to `true`.
* When using `EXTERNAL_LINKS` disposition, a default `byte_limit` of 100 GiB is applied if
- * `byte_limit` is not explcitly set.
+ * `byte_limit` is not explicitly set.
*/
@JsonProperty("byte_limit")
private Long byteLimit;
@@ -29,7 +29,29 @@ public class ExecuteStatementRequest {
@JsonProperty("catalog")
private String catalog;
- /** */
+ /**
+ * The fetch disposition provides two modes of fetching results: `INLINE` and `EXTERNAL_LINKS`.
+ *
+ *
Statements executed with `INLINE` disposition will return result data inline, in
+ * `JSON_ARRAY` format, in a series of chunks. If a given statement produces a result set with a
+ * size larger than 25 MiB, that statement execution is aborted, and no result set will be
+ * available.
+ *
+ *
**NOTE** Byte limits are computed based upon internal representations of the result set
+ * data, and might not match the sizes visible in JSON responses.
+ *
+ *
Statements executed with `EXTERNAL_LINKS` disposition will return result data as external
+ * links: URLs that point to cloud storage internal to the workspace. Using `EXTERNAL_LINKS`
+ * disposition allows statements to generate arbitrarily sized result sets for fetching up to 100
+ * GiB. The resulting links have two important properties:
+ *
+ *
1. They point to resources _external_ to the Databricks compute; therefore any associated
+ * authentication information (typically a personal access token, OAuth token, or similar) _must
+ * be removed_ when fetching from these links.
+ *
+ *
2. These are URLs with a specific expiration, indicated in the response. The behavior when
+ * attempting to use an expired link is cloud specific.
+ */
@JsonProperty("disposition")
private Disposition disposition;
@@ -93,13 +115,13 @@ public class ExecuteStatementRequest {
*
*
For example, the following statement contains two parameters, `my_name` and `my_date`:
*
- *
SELECT * FROM my_table WHERE name = :my_name AND date = :my_date
+ *
``` SELECT * FROM my_table WHERE name = :my_name AND date = :my_date ```
*
*
The parameters can be passed in the request body as follows:
*
- *
{ ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date",
+ *
` { ..., "statement": "SELECT * FROM my_table WHERE name = :my_name AND date = :my_date",
* "parameters": [ { "name": "my_name", "value": "the name" }, { "name": "my_date", "value":
- * "2020-01-01", "type": "DATE" } ] }
+ * "2020-01-01", "type": "DATE" } ] } `
*
*
Currently, positional parameters denoted by a `?` marker are not supported by the Databricks
* SQL Statement Execution API.
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java
index 1b88216f2..569281981 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ExternalLink.java
@@ -28,7 +28,11 @@ public class ExternalLink {
@JsonProperty("expiration")
private String expiration;
- /** */
+ /**
+ * A URL pointing to a chunk of result data, hosted by an external service, with a short
+ * expiration time (<= 15 minutes). As this URL contains a temporary credential, it should be
+ * considered sensitive and the client should not expose this URL in a log.
+ */
@JsonProperty("external_link")
private String externalLink;
@@ -44,7 +48,7 @@ public class ExternalLink {
/**
* When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are
* no more chunks. The next chunk can be fetched with a
- * :method:statementexecution/getStatementResultChunkN request.
+ * :method:statementexecution/getstatementresultchunkn request.
*/
@JsonProperty("next_chunk_index")
private Long nextChunkIndex;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java
index e7cb13ba1..6a789a483 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponse.java
@@ -70,7 +70,7 @@ public class GetWarehouseResponse {
/**
* Maximum number of clusters that the autoscaler will create to handle concurrent queries.
*
- *
Supported values: - Must be >= min_num_clusters - Must be <= 30.
+ *
Supported values: - Must be >= min_num_clusters - Must be <= 40.
*
*
Defaults to min_clusters if unset.
*/
@@ -110,11 +110,11 @@ public class GetWarehouseResponse {
@JsonProperty("odbc_params")
private OdbcParams odbcParams;
- /** */
+ /** Configurations whether the endpoint should use spot instances. */
@JsonProperty("spot_instance_policy")
private SpotInstancePolicy spotInstancePolicy;
- /** */
+ /** state of the endpoint */
@JsonProperty("state")
private State state;
@@ -127,7 +127,10 @@ public class GetWarehouseResponse {
@JsonProperty("tags")
private EndpointTags tags;
- /** */
+ /**
+ * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to
+ * `PRO` and also set the field `enable_serverless_compute` to `true`.
+ */
@JsonProperty("warehouse_type")
private GetWarehouseResponseWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java
index 7e1ada451..ea11574db 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWarehouseResponseWarehouseType.java
@@ -4,10 +4,6 @@
import com.databricks.sdk.support.Generated;
-/**
- * Warehouse type: `PRO` or `CLASSIC`. If you want to use serverless compute, you must set to `PRO`
- * and also set the field `enable_serverless_compute` to `true`.
- */
@Generated
public enum GetWarehouseResponseWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java
index 17524f5d2..50913917d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponse.java
@@ -25,6 +25,10 @@ public class GetWorkspaceWarehouseConfigResponse {
@JsonProperty("data_access_config")
private Collection dataAccessConfig;
+ /** Enable Serverless compute for SQL warehouses */
+ @JsonProperty("enable_serverless_compute")
+ private Boolean enableServerlessCompute;
+
/**
* List of Warehouse Types allowed in this workspace (limits allowed value of the type field in
* CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be
@@ -43,7 +47,10 @@ public class GetWorkspaceWarehouseConfigResponse {
@JsonProperty("google_service_account")
private String googleServiceAccount;
- /** AWS Only: Instance profile used to pass IAM role to the cluster */
+ /**
+ * AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This
+ * configuration is also applied to the workspace's serverless compute for notebooks and jobs.
+ */
@JsonProperty("instance_profile_arn")
private String instanceProfileArn;
@@ -83,6 +90,16 @@ public Collection getDataAccessConfig() {
return dataAccessConfig;
}
+ public GetWorkspaceWarehouseConfigResponse setEnableServerlessCompute(
+ Boolean enableServerlessCompute) {
+ this.enableServerlessCompute = enableServerlessCompute;
+ return this;
+ }
+
+ public Boolean getEnableServerlessCompute() {
+ return enableServerlessCompute;
+ }
+
public GetWorkspaceWarehouseConfigResponse setEnabledWarehouseTypes(
Collection enabledWarehouseTypes) {
this.enabledWarehouseTypes = enabledWarehouseTypes;
@@ -148,6 +165,7 @@ public boolean equals(Object o) {
return Objects.equals(channel, that.channel)
&& Objects.equals(configParam, that.configParam)
&& Objects.equals(dataAccessConfig, that.dataAccessConfig)
+ && Objects.equals(enableServerlessCompute, that.enableServerlessCompute)
&& Objects.equals(enabledWarehouseTypes, that.enabledWarehouseTypes)
&& Objects.equals(globalParam, that.globalParam)
&& Objects.equals(googleServiceAccount, that.googleServiceAccount)
@@ -162,6 +180,7 @@ public int hashCode() {
channel,
configParam,
dataAccessConfig,
+ enableServerlessCompute,
enabledWarehouseTypes,
globalParam,
googleServiceAccount,
@@ -176,6 +195,7 @@ public String toString() {
.add("channel", channel)
.add("configParam", configParam)
.add("dataAccessConfig", dataAccessConfig)
+ .add("enableServerlessCompute", enableServerlessCompute)
.add("enabledWarehouseTypes", enabledWarehouseTypes)
.add("globalParam", globalParam)
.add("googleServiceAccount", googleServiceAccount)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponseSecurityPolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponseSecurityPolicy.java
index f7066b756..2abbb5e26 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponseSecurityPolicy.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/GetWorkspaceWarehouseConfigResponseSecurityPolicy.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
-/** Security policy for warehouses */
+/** Security policy to be used for warehouses */
@Generated
public enum GetWorkspaceWarehouseConfigResponseSecurityPolicy {
DATA_ACCESS_CONTROL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesRequest.java
index 157451774..41bb6164a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesRequest.java
@@ -10,14 +10,48 @@
@Generated
public class ListWarehousesRequest {
+ /** The max number of warehouses to return. */
+ @JsonIgnore
+ @QueryParam("page_size")
+ private Long pageSize;
+
+ /**
+ * A page token, received from a previous `ListWarehouses` call. Provide this to retrieve the
+ * subsequent page; otherwise the first will be retrieved.
+ *
+ * When paginating, all other parameters provided to `ListWarehouses` must match the call that
+ * provided the page token.
+ */
+ @JsonIgnore
+ @QueryParam("page_token")
+ private String pageToken;
+
/**
- * Service Principal which will be used to fetch the list of warehouses. If not specified, the
- * user from the session header is used.
+ * Service Principal which will be used to fetch the list of endpoints. If not specified, SQL
+ * Gateway will use the user from the session header.
*/
@JsonIgnore
@QueryParam("run_as_user_id")
private Long runAsUserId;
+ public ListWarehousesRequest setPageSize(Long pageSize) {
+ this.pageSize = pageSize;
+ return this;
+ }
+
+ public Long getPageSize() {
+ return pageSize;
+ }
+
+ public ListWarehousesRequest setPageToken(String pageToken) {
+ this.pageToken = pageToken;
+ return this;
+ }
+
+ public String getPageToken() {
+ return pageToken;
+ }
+
public ListWarehousesRequest setRunAsUserId(Long runAsUserId) {
this.runAsUserId = runAsUserId;
return this;
@@ -32,16 +66,22 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListWarehousesRequest that = (ListWarehousesRequest) o;
- return Objects.equals(runAsUserId, that.runAsUserId);
+ return Objects.equals(pageSize, that.pageSize)
+ && Objects.equals(pageToken, that.pageToken)
+ && Objects.equals(runAsUserId, that.runAsUserId);
}
@Override
public int hashCode() {
- return Objects.hash(runAsUserId);
+ return Objects.hash(pageSize, pageToken, runAsUserId);
}
@Override
public String toString() {
- return new ToStringer(ListWarehousesRequest.class).add("runAsUserId", runAsUserId).toString();
+ return new ToStringer(ListWarehousesRequest.class)
+ .add("pageSize", pageSize)
+ .add("pageToken", pageToken)
+ .add("runAsUserId", runAsUserId)
+ .toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java
index 53eb8c282..8f2aed6f3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ListWarehousesResponse.java
@@ -10,10 +10,26 @@
@Generated
public class ListWarehousesResponse {
+ /**
+ * A token, which can be sent as `page_token` to retrieve the next page. If this field is omitted,
+ * there are no subsequent pages.
+ */
+ @JsonProperty("next_page_token")
+ private String nextPageToken;
+
/** A list of warehouses and their configurations. */
@JsonProperty("warehouses")
private Collection warehouses;
+ public ListWarehousesResponse setNextPageToken(String nextPageToken) {
+ this.nextPageToken = nextPageToken;
+ return this;
+ }
+
+ public String getNextPageToken() {
+ return nextPageToken;
+ }
+
public ListWarehousesResponse setWarehouses(Collection warehouses) {
this.warehouses = warehouses;
return this;
@@ -28,16 +44,20 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
ListWarehousesResponse that = (ListWarehousesResponse) o;
- return Objects.equals(warehouses, that.warehouses);
+ return Objects.equals(nextPageToken, that.nextPageToken)
+ && Objects.equals(warehouses, that.warehouses);
}
@Override
public int hashCode() {
- return Objects.hash(warehouses);
+ return Objects.hash(nextPageToken, warehouses);
}
@Override
public String toString() {
- return new ToStringer(ListWarehousesResponse.class).add("warehouses", warehouses).toString();
+ return new ToStringer(ListWarehousesResponse.class)
+ .add("nextPageToken", nextPageToken)
+ .add("warehouses", warehouses)
+ .toString();
}
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ResultData.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ResultData.java
index be696df15..217e370f9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ResultData.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/ResultData.java
@@ -8,6 +8,13 @@
import java.util.Collection;
import java.util.Objects;
+/**
+ * Contains the result data of a single chunk when using `INLINE` disposition. When using
+ * `EXTERNAL_LINKS` disposition, the array `external_links` is used instead to provide URLs to the
+ * result data in cloud storage. Exactly one of these alternatives is used. (While the
+ * `external_links` array prepares the API to return multiple links in a single response. Currently
+ * only a single link is returned.)
+ */
@Generated
public class ResultData {
/**
@@ -35,7 +42,7 @@ public class ResultData {
/**
* When fetching, provides the `chunk_index` for the _next_ chunk. If absent, indicates there are
* no more chunks. The next chunk can be fetched with a
- * :method:statementexecution/getStatementResultChunkN request.
+ * :method:statementexecution/getstatementresultchunkn request.
*/
@JsonProperty("next_chunk_index")
private Long nextChunkIndex;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequest.java
index fed2b2a4e..9a19ee96b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequest.java
@@ -8,6 +8,12 @@
import java.util.Collection;
import java.util.Objects;
+/**
+ * Sets the workspace level warehouse configuration that is shared by all SQL warehouses in this
+ * workspace.
+ *
+ * This is idempotent.
+ */
@Generated
public class SetWorkspaceWarehouseConfigRequest {
/** Optional: Channel selection details */
@@ -25,6 +31,10 @@ public class SetWorkspaceWarehouseConfigRequest {
@JsonProperty("data_access_config")
private Collection dataAccessConfig;
+ /** Enable Serverless compute for SQL warehouses */
+ @JsonProperty("enable_serverless_compute")
+ private Boolean enableServerlessCompute;
+
/**
* List of Warehouse Types allowed in this workspace (limits allowed value of the type field in
* CreateWarehouse and EditWarehouse). Note: Some types cannot be disabled, they don't need to be
@@ -43,7 +53,10 @@ public class SetWorkspaceWarehouseConfigRequest {
@JsonProperty("google_service_account")
private String googleServiceAccount;
- /** AWS Only: Instance profile used to pass IAM role to the cluster */
+ /**
+ * AWS Only: The instance profile used to pass an IAM role to the SQL warehouses. This
+ * configuration is also applied to the workspace's serverless compute for notebooks and jobs.
+ */
@JsonProperty("instance_profile_arn")
private String instanceProfileArn;
@@ -83,6 +96,16 @@ public Collection getDataAccessConfig() {
return dataAccessConfig;
}
+ public SetWorkspaceWarehouseConfigRequest setEnableServerlessCompute(
+ Boolean enableServerlessCompute) {
+ this.enableServerlessCompute = enableServerlessCompute;
+ return this;
+ }
+
+ public Boolean getEnableServerlessCompute() {
+ return enableServerlessCompute;
+ }
+
public SetWorkspaceWarehouseConfigRequest setEnabledWarehouseTypes(
Collection enabledWarehouseTypes) {
this.enabledWarehouseTypes = enabledWarehouseTypes;
@@ -148,6 +171,7 @@ public boolean equals(Object o) {
return Objects.equals(channel, that.channel)
&& Objects.equals(configParam, that.configParam)
&& Objects.equals(dataAccessConfig, that.dataAccessConfig)
+ && Objects.equals(enableServerlessCompute, that.enableServerlessCompute)
&& Objects.equals(enabledWarehouseTypes, that.enabledWarehouseTypes)
&& Objects.equals(globalParam, that.globalParam)
&& Objects.equals(googleServiceAccount, that.googleServiceAccount)
@@ -162,6 +186,7 @@ public int hashCode() {
channel,
configParam,
dataAccessConfig,
+ enableServerlessCompute,
enabledWarehouseTypes,
globalParam,
googleServiceAccount,
@@ -176,6 +201,7 @@ public String toString() {
.add("channel", channel)
.add("configParam", configParam)
.add("dataAccessConfig", dataAccessConfig)
+ .add("enableServerlessCompute", enableServerlessCompute)
.add("enabledWarehouseTypes", enabledWarehouseTypes)
.add("globalParam", globalParam)
.add("googleServiceAccount", googleServiceAccount)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequestSecurityPolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequestSecurityPolicy.java
index dcff39c66..87d99e715 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequestSecurityPolicy.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SetWorkspaceWarehouseConfigRequestSecurityPolicy.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
-/** Security policy for warehouses */
+/** Security policy to be used for warehouses */
@Generated
public enum SetWorkspaceWarehouseConfigRequestSecurityPolicy {
DATA_ACCESS_CONTROL,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SpotInstancePolicy.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SpotInstancePolicy.java
index 2c6f25fb7..bd5ec7c05 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SpotInstancePolicy.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/SpotInstancePolicy.java
@@ -4,7 +4,22 @@
import com.databricks.sdk.support.Generated;
-/** Configurations whether the warehouse should use spot instances. */
+/**
+ * EndpointSpotInstancePolicy configures whether the endpoint should use spot instances.
+ *
+ * The breakdown of how the EndpointSpotInstancePolicy converts to per cloud configurations is:
+ *
+ *
+-------+--------------------------------------+--------------------------------+ | Cloud |
+ * COST_OPTIMIZED | RELIABILITY_OPTIMIZED |
+ * +-------+--------------------------------------+--------------------------------+ | AWS | On
+ * Demand Driver with Spot Executors | On Demand Driver and Executors | | AZURE | On Demand Driver
+ * and Executors | On Demand Driver and Executors |
+ * +-------+--------------------------------------+--------------------------------+
+ *
+ *
While including "spot" in the enum name may limit the the future extensibility of this field
+ * because it limits this enum to denoting "spot or not", this is the field that PM recommends after
+ * discussion with customers per SC-48783.
+ */
@Generated
public enum SpotInstancePolicy {
COST_OPTIMIZED,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java
index 64dde2ce5..c6f05715e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/State.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
-/** State of the warehouse */
+/** * State of a warehouse. */
@Generated
public enum State {
DELETED,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java
index 16b6fa0b0..c47eed4aa 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionAPI.java
@@ -33,18 +33,19 @@
* yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can
* be set to `CANCEL`, which cancels the statement.
*
- *
In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call
+ *
In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call
* waits up to 30 seconds; if the statement execution finishes within this time, the result data is
* returned directly in the response. If the execution takes longer than 30 seconds, the execution
- * is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s`
- * (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns
- * directly with a statement ID. The status of the statement execution can be polled by issuing
- * :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded,
- * this call also returns the result and metadata in the response. - Hybrid mode (default) -
- * `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the
- * statement execution finishes within this time, the result data is returned directly in the
- * response. If the execution takes longer than 10 seconds, a statement ID is returned. The
- * statement ID can be used to fetch status and results in the same way as in the asynchronous mode.
+ * is canceled and the call returns with a `CANCELED` state. - **Asynchronous mode**
+ * (`wait_timeout=0s` and `on_wait_timeout` is ignored): The call doesn't wait for the statement to
+ * finish but returns directly with a statement ID. The status of the statement execution can be
+ * polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
+ * execution has succeeded, this call also returns the result and metadata in the response. -
+ * **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits for
+ * up to 10 seconds; if the statement execution finishes within this time, the result data is
+ * returned directly in the response. If the execution takes longer than 10 seconds, a statement ID
+ * is returned. The statement ID can be used to fetch status and results in the same way as in the
+ * asynchronous mode.
*
*
Depending on the size, the result can be split into multiple chunks. If the statement
* execution is successful, the statement response contains a manifest and the first chunk of the
@@ -114,13 +115,61 @@ public StatementExecutionAPI(StatementExecutionService mock) {
/**
* Requests that an executing statement be canceled. Callers must poll for status to see the
- * terminal state.
+ * terminal state. Cancel response is empty; receiving response indicates successful receipt.
*/
public void cancelExecution(CancelExecutionRequest request) {
impl.cancelExecution(request);
}
- /** Execute a SQL statement */
+ /**
+ * Execute a SQL statement and optionally await its results for a specified time.
+ *
+ *
**Use case: small result sets with INLINE + JSON_ARRAY**
+ *
+ *
For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of
+ * `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.
+ *
+ *
**Use case: large result sets with EXTERNAL_LINKS**
+ *
+ *
Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets
+ * efficiently. The main differences from using `INLINE` disposition are that the result data is
+ * accessed with URLs, and that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and
+ * `CSV` compared to only `JSON_ARRAY` with `INLINE`.
+ *
+ *
** URLs**
+ *
+ *
External links point to data stored within your workspace's internal storage, in the form of
+ * a URL. The URLs are valid for only a short period, <= 15 minutes. Alongside each
+ * `external_link` is an expiration field indicating the time at which the URL is no longer valid.
+ * In `EXTERNAL_LINKS` mode, chunks can be resolved and fetched multiple times and in parallel.
+ *
+ *
----
+ *
+ *
### **Warning: Databricks strongly recommends that you protect the URLs that are returned by
+ * the `EXTERNAL_LINKS` disposition.**
+ *
+ *
When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be
+ * used to download the results directly from . As a short-lived is embedded in this URL, you
+ * should protect the URL.
+ *
+ *
Because URLs are already generated with embedded temporary s, you must not set an
+ * `Authorization` header in the download requests.
+ *
+ *
The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case.
+ *
+ *
See also [Security best practices].
+ *
+ *
----
+ *
+ *
StatementResponse contains `statement_id` and `status`; other fields might be absent or
+ * present depending on context. If the SQL warehouse fails to execute the provided statement, a
+ * 200 response is returned with `status.state` set to `FAILED` (in contrast to a failure when
+ * accepting the request, which results in a non-200 response). Details of the error can be found
+ * at `status.error` in case of execution failures.
+ *
+ *
[Security best practices]:
+ * https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices
+ */
public StatementResponse executeStatement(ExecuteStatementRequest request) {
return impl.executeStatement(request);
}
@@ -130,11 +179,13 @@ public StatementResponse getStatement(String statementId) {
}
/**
- * This request can be used to poll for the statement's status. When the `status.state` field is
- * `SUCCEEDED` it will also return the result manifest and the first chunk of the result data.
- * When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP
- * 200 with the state set. After at least 12 hours in terminal state, the statement is removed
- * from the warehouse and further calls will receive an HTTP 404 response.
+ * This request can be used to poll for the statement's status. StatementResponse contains
+ * `statement_id` and `status`; other fields might be absent or present depending on context. When
+ * the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first
+ * chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or
+ * `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state,
+ * the statement is removed from the warehouse and further calls will receive an HTTP 404
+ * response.
*
*
**NOTE** This call currently might take up to 5 seconds to get the latest status and result.
*/
@@ -156,7 +207,8 @@ public ResultData getStatementResultChunkN(String statementId, long chunkIndex)
* request can be used to fetch subsequent chunks. The response structure is identical to the
* nested `result` element described in the :method:statementexecution/getStatement request, and
* similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple
- * iteration through the result set.
+ * iteration through the result set. Depending on `disposition`, the response returns chunks of
+ * data either inline, or as links.
*/
public ResultData getStatementResultChunkN(GetStatementResultChunkNRequest request) {
return impl.getStatementResultChunkN(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java
index 4c321af78..763e09ac2 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionImpl.java
@@ -30,7 +30,7 @@ public void cancelExecution(CancelExecutionRequest request) {
@Override
public StatementResponse executeStatement(ExecuteStatementRequest request) {
- String path = "/api/2.0/sql/statements/";
+ String path = "/api/2.0/sql/statements";
try {
Request req = new Request("POST", path, apiClient.serialize(request));
ApiClient.setQuery(req, request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java
index 50fae0fc5..951af8946 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementExecutionService.java
@@ -30,18 +30,19 @@
* yet finished. This can be set to either `CONTINUE`, to fallback to asynchronous mode, or it can
* be set to `CANCEL`, which cancels the statement.
*
- *
In summary: - Synchronous mode - `wait_timeout=30s` and `on_wait_timeout=CANCEL` - The call
+ *
In summary: - **Synchronous mode** (`wait_timeout=30s` and `on_wait_timeout=CANCEL`): The call
* waits up to 30 seconds; if the statement execution finishes within this time, the result data is
* returned directly in the response. If the execution takes longer than 30 seconds, the execution
- * is canceled and the call returns with a `CANCELED` state. - Asynchronous mode - `wait_timeout=0s`
- * (`on_wait_timeout` is ignored) - The call doesn't wait for the statement to finish but returns
- * directly with a statement ID. The status of the statement execution can be polled by issuing
- * :method:statementexecution/getStatement with the statement ID. Once the execution has succeeded,
- * this call also returns the result and metadata in the response. - Hybrid mode (default) -
- * `wait_timeout=10s` and `on_wait_timeout=CONTINUE` - The call waits for up to 10 seconds; if the
- * statement execution finishes within this time, the result data is returned directly in the
- * response. If the execution takes longer than 10 seconds, a statement ID is returned. The
- * statement ID can be used to fetch status and results in the same way as in the asynchronous mode.
+ * is canceled and the call returns with a `CANCELED` state. - **Asynchronous mode**
+ * (`wait_timeout=0s` and `on_wait_timeout` is ignored): The call doesn't wait for the statement to
+ * finish but returns directly with a statement ID. The status of the statement execution can be
+ * polled by issuing :method:statementexecution/getStatement with the statement ID. Once the
+ * execution has succeeded, this call also returns the result and metadata in the response. -
+ * **[Default] Hybrid mode** (`wait_timeout=10s` and `on_wait_timeout=CONTINUE`): The call waits for
+ * up to 10 seconds; if the statement execution finishes within this time, the result data is
+ * returned directly in the response. If the execution takes longer than 10 seconds, a statement ID
+ * is returned. The statement ID can be used to fetch status and results in the same way as in the
+ * asynchronous mode.
*
*
Depending on the size, the result can be split into multiple chunks. If the statement
* execution is successful, the statement response contains a manifest and the first chunk of the
@@ -101,19 +102,69 @@
public interface StatementExecutionService {
/**
* Requests that an executing statement be canceled. Callers must poll for status to see the
- * terminal state.
+ * terminal state. Cancel response is empty; receiving response indicates successful receipt.
*/
void cancelExecution(CancelExecutionRequest cancelExecutionRequest);
- /** Execute a SQL statement */
+ /**
+ * Execute a SQL statement and optionally await its results for a specified time.
+ *
+ *
**Use case: small result sets with INLINE + JSON_ARRAY**
+ *
+ *
For flows that generate small and predictable result sets (<= 25 MiB), `INLINE` responses of
+ * `JSON_ARRAY` result data are typically the simplest way to execute and fetch result data.
+ *
+ *
**Use case: large result sets with EXTERNAL_LINKS**
+ *
+ *
Using `EXTERNAL_LINKS` to fetch result data allows you to fetch large result sets
+ * efficiently. The main differences from using `INLINE` disposition are that the result data is
+ * accessed with URLs, and that there are 3 supported formats: `JSON_ARRAY`, `ARROW_STREAM` and
+ * `CSV` compared to only `JSON_ARRAY` with `INLINE`.
+ *
+ *
** URLs**
+ *
+ *
External links point to data stored within your workspace's internal storage, in the form of
+ * a URL. The URLs are valid for only a short period, <= 15 minutes. Alongside each
+ * `external_link` is an expiration field indicating the time at which the URL is no longer valid.
+ * In `EXTERNAL_LINKS` mode, chunks can be resolved and fetched multiple times and in parallel.
+ *
+ *
----
+ *
+ *
### **Warning: Databricks strongly recommends that you protect the URLs that are returned by
+ * the `EXTERNAL_LINKS` disposition.**
+ *
+ *
When you use the `EXTERNAL_LINKS` disposition, a short-lived, URL is generated, which can be
+ * used to download the results directly from . As a short-lived is embedded in this URL, you
+ * should protect the URL.
+ *
+ *
Because URLs are already generated with embedded temporary s, you must not set an
+ * `Authorization` header in the download requests.
+ *
+ *
The `EXTERNAL_LINKS` disposition can be disabled upon request by creating a support case.
+ *
+ *
See also [Security best practices].
+ *
+ *
----
+ *
+ *
StatementResponse contains `statement_id` and `status`; other fields might be absent or
+ * present depending on context. If the SQL warehouse fails to execute the provided statement, a
+ * 200 response is returned with `status.state` set to `FAILED` (in contrast to a failure when
+ * accepting the request, which results in a non-200 response). Details of the error can be found
+ * at `status.error` in case of execution failures.
+ *
+ *
[Security best practices]:
+ * https://docs.databricks.com/sql/admin/sql-execution-tutorial.html#security-best-practices
+ */
StatementResponse executeStatement(ExecuteStatementRequest executeStatementRequest);
/**
- * This request can be used to poll for the statement's status. When the `status.state` field is
- * `SUCCEEDED` it will also return the result manifest and the first chunk of the result data.
- * When the statement is in the terminal states `CANCELED`, `CLOSED` or `FAILED`, it returns HTTP
- * 200 with the state set. After at least 12 hours in terminal state, the statement is removed
- * from the warehouse and further calls will receive an HTTP 404 response.
+ * This request can be used to poll for the statement's status. StatementResponse contains
+ * `statement_id` and `status`; other fields might be absent or present depending on context. When
+ * the `status.state` field is `SUCCEEDED` it will also return the result manifest and the first
+ * chunk of the result data. When the statement is in the terminal states `CANCELED`, `CLOSED` or
+ * `FAILED`, it returns HTTP 200 with the state set. After at least 12 hours in terminal state,
+ * the statement is removed from the warehouse and further calls will receive an HTTP 404
+ * response.
*
*
**NOTE** This call currently might take up to 5 seconds to get the latest status and result.
*/
@@ -126,7 +177,8 @@ public interface StatementExecutionService {
* request can be used to fetch subsequent chunks. The response structure is identical to the
* nested `result` element described in the :method:statementexecution/getStatement request, and
* similarly includes the `next_chunk_index` and `next_chunk_internal_link` fields for simple
- * iteration through the result set.
+ * iteration through the result set. Depending on `disposition`, the response returns chunks of
+ * data either inline, or as links.
*/
ResultData getStatementResultChunkN(
GetStatementResultChunkNRequest getStatementResultChunkNRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java
index 87cf79688..7d1d7a62c 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementState.java
@@ -4,21 +4,12 @@
import com.databricks.sdk.support.Generated;
-/**
- * Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running - `SUCCEEDED`:
- * execution was successful, result data available for fetch - `FAILED`: execution failed; reason
- * for failure described in accomanying error message - `CANCELED`: user canceled; can come from
- * explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`: execution successful,
- * and statement closed; result no longer available for fetch
- */
@Generated
public enum StatementState {
- CANCELED, // user canceled; can come from explicit cancel call, or timeout with
- // `on_wait_timeout=CANCEL`
- CLOSED, // execution successful, and statement closed; result no longer available for
- // fetch
- FAILED, // execution failed; reason for failure described in accomanying error message
- PENDING, // waiting for warehouse
- RUNNING, // running
- SUCCEEDED, // execution was successful, result data available for fetch
+ CANCELED,
+ CLOSED,
+ FAILED,
+ PENDING,
+ RUNNING,
+ SUCCEEDED,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java
index ddbfd8aa0..0fd5f703d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/StatementStatus.java
@@ -14,7 +14,13 @@ public class StatementStatus {
@JsonProperty("error")
private ServiceError error;
- /** */
+ /**
+ * Statement execution state: - `PENDING`: waiting for warehouse - `RUNNING`: running -
+ * `SUCCEEDED`: execution was successful, result data available for fetch - `FAILED`: execution
+ * failed; reason for failure described in accompanying error message - `CANCELED`: user canceled;
+ * can come from explicit cancel call, or timeout with `on_wait_timeout=CANCEL` - `CLOSED`:
+ * execution successful, and statement closed; result no longer available for fetch
+ */
@JsonProperty("state")
private StatementState state;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java
index 6d33b75e1..9d1a89702 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/Status.java
@@ -4,11 +4,9 @@
import com.databricks.sdk.support.Generated;
-/** Health status of the warehouse. */
@Generated
public enum Status {
DEGRADED,
FAILED,
HEALTHY,
- STATUS_UNSPECIFIED,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java
index 3ee502add..8bfe1b758 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/TerminationReasonCode.java
@@ -4,21 +4,36 @@
import com.databricks.sdk.support.Generated;
-/** status code indicating why the cluster was terminated */
+/** The status code indicating why the cluster was terminated */
@Generated
public enum TerminationReasonCode {
ABUSE_DETECTED,
+ ACCESS_TOKEN_FAILURE,
+ ALLOCATION_TIMEOUT,
+ ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY,
+ ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_READY_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS,
+ ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS,
ATTACH_PROJECT_FAILURE,
AWS_AUTHORIZATION_FAILURE,
+ AWS_INACCESSIBLE_KMS_KEY_FAILURE,
+ AWS_INSTANCE_PROFILE_UPDATE_FAILURE,
AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE,
AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE,
+ AWS_INVALID_KEY_PAIR,
+ AWS_INVALID_KMS_KEY_STATE,
AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE,
AWS_REQUEST_LIMIT_EXCEEDED,
+ AWS_RESOURCE_QUOTA_EXCEEDED,
AWS_UNSUPPORTED_FAILURE,
AZURE_BYOK_KEY_PERMISSION_FAILURE,
AZURE_EPHEMERAL_DISK_FAILURE,
AZURE_INVALID_DEPLOYMENT_TEMPLATE,
AZURE_OPERATION_NOT_ALLOWED_EXCEPTION,
+ AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE,
AZURE_QUOTA_EXCEEDED_EXCEPTION,
AZURE_RESOURCE_MANAGER_THROTTLING,
AZURE_RESOURCE_PROVIDER_THROTTLING,
@@ -27,63 +42,148 @@ public enum TerminationReasonCode {
AZURE_VNET_CONFIGURATION_FAILURE,
BOOTSTRAP_TIMEOUT,
BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION,
+ BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG,
+ BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED,
+ BUDGET_POLICY_RESOLUTION_FAILURE,
+ CLOUD_ACCOUNT_POD_QUOTA_EXCEEDED,
+ CLOUD_ACCOUNT_SETUP_FAILURE,
+ CLOUD_OPERATION_CANCELLED,
CLOUD_PROVIDER_DISK_SETUP_FAILURE,
+ CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED,
CLOUD_PROVIDER_LAUNCH_FAILURE,
+ CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG,
CLOUD_PROVIDER_RESOURCE_STOCKOUT,
+ CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG,
CLOUD_PROVIDER_SHUTDOWN,
+ CLUSTER_OPERATION_THROTTLED,
+ CLUSTER_OPERATION_TIMEOUT,
COMMUNICATION_LOST,
CONTAINER_LAUNCH_FAILURE,
CONTROL_PLANE_REQUEST_FAILURE,
+ CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG,
DATABASE_CONNECTION_FAILURE,
+ DATA_ACCESS_CONFIG_CHANGED,
DBFS_COMPONENT_UNHEALTHY,
+ DISASTER_RECOVERY_REPLICATION,
+ DNS_RESOLUTION_ERROR,
+ DOCKER_CONTAINER_CREATION_EXCEPTION,
DOCKER_IMAGE_PULL_FAILURE,
+ DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION,
+ DOCKER_INVALID_OS_EXCEPTION,
+ DRIVER_DNS_RESOLUTION_FAILURE,
+ DRIVER_EVICTION,
+ DRIVER_LAUNCH_TIMEOUT,
+ DRIVER_NODE_UNREACHABLE,
+ DRIVER_OUT_OF_DISK,
+ DRIVER_OUT_OF_MEMORY,
+ DRIVER_POD_CREATION_FAILURE,
+ DRIVER_UNEXPECTED_FAILURE,
+ DRIVER_UNHEALTHY,
DRIVER_UNREACHABLE,
DRIVER_UNRESPONSIVE,
+ DYNAMIC_SPARK_CONF_SIZE_EXCEEDED,
+ EOS_SPARK_IMAGE,
EXECUTION_COMPONENT_UNHEALTHY,
+ EXECUTOR_POD_UNSCHEDULED,
+ GCP_API_RATE_QUOTA_EXCEEDED,
+ GCP_DENIED_BY_ORG_POLICY,
+ GCP_FORBIDDEN,
+ GCP_IAM_TIMEOUT,
+ GCP_INACCESSIBLE_KMS_KEY_FAILURE,
+ GCP_INSUFFICIENT_CAPACITY,
+ GCP_IP_SPACE_EXHAUSTED,
+ GCP_KMS_KEY_PERMISSION_DENIED,
+ GCP_NOT_FOUND,
GCP_QUOTA_EXCEEDED,
+ GCP_RESOURCE_QUOTA_EXCEEDED,
+ GCP_SERVICE_ACCOUNT_ACCESS_DENIED,
GCP_SERVICE_ACCOUNT_DELETED,
+ GCP_SERVICE_ACCOUNT_NOT_FOUND,
+ GCP_SUBNET_NOT_READY,
+ GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED,
+ GKE_BASED_CLUSTER_TERMINATION,
GLOBAL_INIT_SCRIPT_FAILURE,
HIVE_METASTORE_PROVISIONING_FAILURE,
IMAGE_PULL_PERMISSION_DENIED,
INACTIVITY,
+ INIT_CONTAINER_NOT_FINISHED,
INIT_SCRIPT_FAILURE,
INSTANCE_POOL_CLUSTER_FAILURE,
+ INSTANCE_POOL_MAX_CAPACITY_REACHED,
+ INSTANCE_POOL_NOT_FOUND,
INSTANCE_UNREACHABLE,
+ INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG,
+ INTERNAL_CAPACITY_FAILURE,
INTERNAL_ERROR,
INVALID_ARGUMENT,
+ INVALID_AWS_PARAMETER,
+ INVALID_INSTANCE_PLACEMENT_PROTOCOL,
INVALID_SPARK_IMAGE,
+ INVALID_WORKER_IMAGE_FAILURE,
+ IN_PENALTY_BOX,
IP_EXHAUSTION_FAILURE,
JOB_FINISHED,
+ K8S_ACTIVE_POD_QUOTA_EXCEEDED,
K8S_AUTOSCALING_FAILURE,
K8S_DBR_CLUSTER_LAUNCH_TIMEOUT,
+ LAZY_ALLOCATION_TIMEOUT,
+ MAINTENANCE_MODE,
METASTORE_COMPONENT_UNHEALTHY,
NEPHOS_RESOURCE_MANAGEMENT,
+ NETVISOR_SETUP_TIMEOUT,
+ NETWORK_CHECK_CONTROL_PLANE_FAILURE,
+ NETWORK_CHECK_DNS_SERVER_FAILURE,
+ NETWORK_CHECK_METADATA_ENDPOINT_FAILURE,
+ NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE,
+ NETWORK_CHECK_NIC_FAILURE,
+ NETWORK_CHECK_STORAGE_FAILURE,
NETWORK_CONFIGURATION_FAILURE,
NFS_MOUNT_FAILURE,
+ NO_ACTIVATED_K8S,
+ NO_ACTIVATED_K8S_TESTING_TAG,
+ NO_MATCHED_K8S,
+ NO_MATCHED_K8S_TESTING_TAG,
NPIP_TUNNEL_SETUP_FAILURE,
NPIP_TUNNEL_TOKEN_FAILURE,
+ POD_ASSIGNMENT_FAILURE,
+ POD_SCHEDULING_FAILURE,
REQUEST_REJECTED,
REQUEST_THROTTLED,
+ RESOURCE_USAGE_BLOCKED,
+ SECRET_CREATION_FAILURE,
+ SECRET_PERMISSION_DENIED,
SECRET_RESOLUTION_ERROR,
+ SECURITY_AGENTS_FAILED_INITIAL_VERIFICATION,
SECURITY_DAEMON_REGISTRATION_EXCEPTION,
SELF_BOOTSTRAP_FAILURE,
+ SERVERLESS_LONG_RUNNING_TERMINATED,
SKIPPED_SLOW_NODES,
SLOW_IMAGE_DOWNLOAD,
SPARK_ERROR,
SPARK_IMAGE_DOWNLOAD_FAILURE,
+ SPARK_IMAGE_DOWNLOAD_THROTTLED,
+ SPARK_IMAGE_NOT_FOUND,
SPARK_STARTUP_FAILURE,
SPOT_INSTANCE_TERMINATION,
+ SSH_BOOTSTRAP_FAILURE,
STORAGE_DOWNLOAD_FAILURE,
+ STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG,
+ STORAGE_DOWNLOAD_FAILURE_SLOW,
+ STORAGE_DOWNLOAD_FAILURE_THROTTLED,
STS_CLIENT_SETUP_FAILURE,
SUBNET_EXHAUSTED_FAILURE,
TEMPORARILY_UNAVAILABLE,
TRIAL_EXPIRED,
UNEXPECTED_LAUNCH_FAILURE,
+ UNEXPECTED_POD_RECREATION,
UNKNOWN,
UNSUPPORTED_INSTANCE_TYPE,
UPDATE_INSTANCE_PROFILE_FAILURE,
+ USAGE_POLICY_ENTITLEMENT_DENIED,
+ USER_INITIATED_VM_TERMINATION,
USER_REQUEST,
WORKER_SETUP_FAILURE,
WORKSPACE_CANCELLED_ERROR,
WORKSPACE_CONFIGURATION_ERROR,
+ WORKSPACE_UPDATE,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java
index 562866b2e..bf221ba27 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePair.java
@@ -7,6 +7,10 @@
import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Objects;
+/**
+ * * Configuration values to enable or disable the access to specific warehouse types in the
+ * workspace.
+ */
@Generated
public class WarehouseTypePair {
/**
@@ -16,7 +20,7 @@ public class WarehouseTypePair {
@JsonProperty("enabled")
private Boolean enabled;
- /** Warehouse type: `PRO` or `CLASSIC`. */
+ /** */
@JsonProperty("warehouse_type")
private WarehouseTypePairWarehouseType warehouseType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java
index a0d6f8870..563e75240 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehouseTypePairWarehouseType.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Warehouse type: `PRO` or `CLASSIC`. */
@Generated
public enum WarehouseTypePairWarehouseType {
CLASSIC,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java
index 2382015a1..3253ff69d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WarehousesAPI.java
@@ -189,7 +189,16 @@ public GetWorkspaceWarehouseConfigResponse getWorkspaceWarehouseConfig() {
/** Lists all SQL warehouses that a user has access to. */
public Iterable list(ListWarehousesRequest request) {
return new Paginator<>(
- request, impl::list, ListWarehousesResponse::getWarehouses, response -> null);
+ request,
+ impl::list,
+ ListWarehousesResponse::getWarehouses,
+ response -> {
+ String token = response.getNextPageToken();
+ if (token == null || token.isEmpty()) {
+ return null;
+ }
+ return request.setPageToken(token);
+ });
}
/**
diff --git a/tagging.py b/tagging.py
index 5504bdd0e..f2ac65457 100644
--- a/tagging.py
+++ b/tagging.py
@@ -51,8 +51,7 @@ def commit_and_push(self, message: str):
new_tree = self.repo.create_git_tree(self.changed_files, base_tree)
parent_commit = self.repo.get_git_commit(head_ref.object.sha)
- new_commit = self.repo.create_git_commit(
- message=message, tree=new_tree, parents=[parent_commit])
+ new_commit = self.repo.create_git_commit(message=message, tree=new_tree, parents=[parent_commit])
# Update branch reference
head_ref.edit(new_commit.sha)
self.sha = new_commit.sha
@@ -70,11 +69,10 @@ def tag(self, tag_name: str, tag_message: str):
# The email MUST be the GitHub Apps email.
# Otherwise, the tag will not be verified.
tagger = InputGitAuthor(
- name="Databricks SDK Release Bot",
- email="DECO-SDK-Tagging[bot]@users.noreply.github.com")
+ name="Databricks SDK Release Bot", email="DECO-SDK-Tagging[bot]@users.noreply.github.com"
+ )
- tag = self.repo.create_git_tag(
- tag=tag_name, message=tag_message, object=self.sha, type="commit", tagger=tagger)
+ tag = self.repo.create_git_tag(tag=tag_name, message=tag_message, object=self.sha, type="commit", tagger=tagger)
# Create a Git ref (the actual reference for the tag in the repo)
self.repo.create_git_ref(ref=f"refs/tags/{tag_name}", sha=tag.sha)
@@ -89,6 +87,7 @@ class Package:
:name: The package name.
:path: The path to the package relative to the repository root.
"""
+
name: str
path: str
@@ -140,7 +139,7 @@ def get_package_name(package_path: str) -> str:
}
"""
filepath = os.path.join(os.getcwd(), package_path, PACKAGE_FILE_NAME)
- with open(filepath, 'r') as file:
+ with open(filepath, "r") as file:
content = json.load(file)
if "package" in content:
return content["package"]
@@ -156,21 +155,21 @@ def update_version_references(tag_info: TagInfo) -> None:
# Load version patterns from '.codegen.json' file at the top level of the repository
package_file_path = os.path.join(os.getcwd(), CODEGEN_FILE_NAME)
- with open(package_file_path, 'r') as file:
+ with open(package_file_path, "r") as file:
package_file = json.load(file)
- version = package_file.get('version')
+ version = package_file.get("version")
if not version:
- print(f"`version` not found in .codegen.json. Nothing to update.")
+ print("`version` not found in .codegen.json. Nothing to update.")
return
# Update the versions
for filename, pattern in version.items():
loc = os.path.join(os.getcwd(), tag_info.package.path, filename)
- previous_version = re.sub(r'\$VERSION', r"\\d+\\.\\d+\\.\\d+", pattern)
- new_version = re.sub(r'\$VERSION', tag_info.version, pattern)
+ previous_version = re.sub(r"\$VERSION", r"\\d+\\.\\d+\\.\\d+", pattern)
+ new_version = re.sub(r"\$VERSION", tag_info.version, pattern)
- with open(loc, 'r') as file:
+ with open(loc, "r") as file:
content = file.read()
# Replace the version in the file content
@@ -188,15 +187,15 @@ def clean_next_changelog(package_path: str) -> None:
"""
file_path = os.path.join(os.getcwd(), package_path, NEXT_CHANGELOG_FILE_NAME)
- with open(file_path, 'r') as file:
+ with open(file_path, "r") as file:
content = file.read()
# Remove content between ### sections
- cleaned_content = re.sub(r'(### [^\n]+\n)(?:.*?\n?)*?(?=###|$)', r'\1', content)
+ cleaned_content = re.sub(r"(### [^\n]+\n)(?:.*?\n?)*?(?=###|$)", r"\1", content)
# Ensure there is exactly one empty line before each section
- cleaned_content = re.sub(r'(\n*)(###[^\n]+)', r'\n\n\2', cleaned_content)
+ cleaned_content = re.sub(r"(\n*)(###[^\n]+)", r"\n\n\2", cleaned_content)
# Find the version number
- version_match = re.search(r'Release v(\d+)\.(\d+)\.(\d+)', cleaned_content)
+ version_match = re.search(r"Release v(\d+)\.(\d+)\.(\d+)", cleaned_content)
if not version_match:
raise Exception("Version not found in the changelog")
major, minor, patch = map(int, version_match.groups())
@@ -206,7 +205,7 @@ def clean_next_changelog(package_path: str) -> None:
# are more common than patch or major version releases.
minor += 1
patch = 0
- new_version = f'Release v{major}.{minor}.{patch}'
+ new_version = f"Release v{major}.{minor}.{patch}"
cleaned_content = cleaned_content.replace(version_match.group(0), new_version)
# Update file with cleaned content
@@ -220,19 +219,18 @@ def get_previous_tag_info(package: Package) -> Optional[TagInfo]:
"""
changelog_path = os.path.join(os.getcwd(), package.path, CHANGELOG_FILE_NAME)
- with open(changelog_path, 'r') as f:
+ with open(changelog_path, "r") as f:
changelog = f.read()
# Extract the latest release section using regex
- match = re.search(r"## (\[Release\] )?Release v[\d\.]+.*?(?=\n## (\[Release\] )?Release v|\Z)",
- changelog, re.S)
+ match = re.search(r"## (\[Release\] )?Release v[\d\.]+.*?(?=\n## (\[Release\] )?Release v|\Z)", changelog, re.S)
# E.g., for new packages.
if not match:
return None
latest_release = match.group(0)
- version_match = re.search(r'## (\[Release\] )?Release v(\d+\.\d+\.\d+)', latest_release)
+ version_match = re.search(r"## (\[Release\] )?Release v(\d+\.\d+\.\d+)", latest_release)
if not version_match:
raise Exception("Version not found in the changelog")
@@ -247,22 +245,22 @@ def get_next_tag_info(package: Package) -> Optional[TagInfo]:
"""
next_changelog_path = os.path.join(os.getcwd(), package.path, NEXT_CHANGELOG_FILE_NAME)
# Read NEXT_CHANGELOG.md
- with open(next_changelog_path, 'r') as f:
+ with open(next_changelog_path, "r") as f:
next_changelog = f.read()
# Remove "# NEXT CHANGELOG" line
- next_changelog = re.sub(r'^# NEXT CHANGELOG(\n+)', '', next_changelog, flags=re.MULTILINE)
+ next_changelog = re.sub(r"^# NEXT CHANGELOG(\n+)", "", next_changelog, flags=re.MULTILINE)
# Remove empty sections
- next_changelog = re.sub(r'###[^\n]+\n+(?=##|\Z)', '', next_changelog)
+ next_changelog = re.sub(r"###[^\n]+\n+(?=##|\Z)", "", next_changelog)
# Ensure there is exactly one empty line before each section
- next_changelog = re.sub(r'(\n*)(###[^\n]+)', r'\n\n\2', next_changelog)
+ next_changelog = re.sub(r"(\n*)(###[^\n]+)", r"\n\n\2", next_changelog)
- if not re.search(r'###', next_changelog):
+ if not re.search(r"###", next_changelog):
print("All sections are empty. No changes will be made to the changelog.")
return None
- version_match = re.search(r'## Release v(\d+\.\d+\.\d+)', next_changelog)
+ version_match = re.search(r"## Release v(\d+\.\d+\.\d+)", next_changelog)
if not version_match:
raise Exception("Version not found in the changelog")
@@ -275,10 +273,9 @@ def write_changelog(tag_info: TagInfo) -> None:
Updates the changelog with a new tag info.
"""
changelog_path = os.path.join(os.getcwd(), tag_info.package.path, CHANGELOG_FILE_NAME)
- with open(changelog_path, 'r') as f:
+ with open(changelog_path, "r") as f:
changelog = f.read()
- updated_changelog = re.sub(r'(# Version changelog\n\n)', f'\\1{tag_info.content.strip()}\n\n\n',
- changelog)
+ updated_changelog = re.sub(r"(# Version changelog\n\n)", f"\\1{tag_info.content.strip()}\n\n\n", changelog)
gh.add_file(changelog_path, updated_changelog)
@@ -333,8 +330,7 @@ def is_tag_applied(tag: TagInfo) -> bool:
"""
try:
# Check if the specific tag exists
- result = subprocess.check_output(
- ['git', 'tag', '--list', tag.tag_name()], stderr=subprocess.PIPE, text=True)
+ result = subprocess.check_output(["git", "tag", "--list", tag.tag_name()], stderr=subprocess.PIPE, text=True)
return result.strip() == tag.tag_name()
except subprocess.CalledProcessError as e:
# Raise a exception for git command errors
@@ -349,10 +345,7 @@ def find_last_tags() -> List[TagInfo]:
"""
packages = find_packages()
- return [
- info for info in (get_previous_tag_info(package) for package in packages)
- if info is not None
- ]
+ return [info for info in (get_previous_tag_info(package) for package in packages) if info is not None]
def find_pending_tags() -> List[TagInfo]:
@@ -379,8 +372,9 @@ def generate_commit_message(tag_infos: List[TagInfo]) -> str:
# Sort tag_infos by package name for consistency
tag_infos.sort(key=lambda info: info.package.name)
- return 'Release\n\n' + '\n\n'.join(f"## {info.package.name}/v{info.version}\n\n{info.content}"
- for info in tag_infos)
+ return "Release\n\n" + "\n\n".join(
+ f"## {info.package.name}/v{info.version}\n\n{info.content}" for info in tag_infos
+ )
def push_changes(tag_infos: List[TagInfo]) -> None:
@@ -404,25 +398,24 @@ def reset_repository(hash: Optional[str] = None) -> None:
:param hash: The commit hash to reset to. If None, it resets to HEAD.
"""
# Fetch the latest changes from the remote repository
- subprocess.run(['git', 'fetch'])
+ subprocess.run(["git", "fetch"])
# Determine the commit hash (default to origin/main if none is provided)
- commit_hash = hash or 'origin/main'
+ commit_hash = hash or "origin/main"
# Reset in memory changed files and the commit hash
gh.reset(hash)
# Construct the Git reset command
- command = ['git', 'reset', '--hard', commit_hash]
+ command = ["git", "reset", "--hard", commit_hash]
# Execute the git reset command
subprocess.run(command, check=True)
-def retry_function(func: Callable[[], List[TagInfo]],
- cleanup: Callable[[], None],
- max_attempts: int = 5,
- delay: int = 5) -> List[TagInfo]:
+def retry_function(
+ func: Callable[[], List[TagInfo]], cleanup: Callable[[], None], max_attempts: int = 5, delay: int = 5
+) -> List[TagInfo]:
"""
Calls a function call up to `max_attempts` times if an exception occurs.
@@ -451,9 +444,7 @@ def update_changelogs(packages: List[Package]) -> List[TagInfo]:
"""
Updates changelogs and pushes the commits.
"""
- tag_infos = [
- info for info in (process_package(package) for package in packages) if info is not None
- ]
+ tag_infos = [info for info in (process_package(package) for package in packages) if info is not None]
# If any package was changed, push the changes.
if tag_infos:
push_changes(tag_infos)
@@ -479,12 +470,12 @@ def run_command(command: List[str]) -> str:
def pull_last_release_commit() -> None:
"""
- Reset the repository to the last release.
+ Reset the repository to the last release.
Uses commit for last change to .release_metadata.json, since it's only updated on releases.
"""
commit_hash = subprocess.check_output(
- ['git', 'log', '-n', '1', '--format=%H', '--', '.release_metadata.json'],
- text=True).strip()
+ ["git", "log", "-n", "1", "--format=%H", "--", ".release_metadata.json"], text=True
+ ).strip()
# If no commit is found, raise an exception
if not commit_hash:
@@ -499,15 +490,15 @@ def get_package_from_args() -> Optional[str]:
Retrieves an optional package
python3 ./tagging.py --package
"""
- parser = argparse.ArgumentParser(description='Update changelogs and tag the release.')
- parser.add_argument('--package', '-p', type=str, help='Tag a single package')
+ parser = argparse.ArgumentParser(description="Update changelogs and tag the release.")
+ parser.add_argument("--package", "-p", type=str, help="Tag a single package")
args = parser.parse_args()
return args.package
def init_github():
- token = os.environ['GITHUB_TOKEN']
- repo_name = os.environ['GITHUB_REPOSITORY']
+ token = os.environ["GITHUB_TOKEN"]
+ repo_name = os.environ["GITHUB_REPOSITORY"]
g = Github(token)
repo = g.get_repo(repo_name)
global gh
@@ -536,8 +527,7 @@ def process():
# Therefore, we don't support specifying the package until the previously started process has been successfully completed.
if pending_tags and package_name:
pending_packages = [tag.package.name for tag in pending_tags]
- raise Exception(
- f"Cannot release package {package_name}. Pending release for {pending_packages}")
+ raise Exception(f"Cannot release package {package_name}. Pending release for {pending_packages}")
if pending_tags:
print("Found pending tags from previous executions, entering recovery mode.")
@@ -550,8 +540,7 @@ def process():
if package_name:
packages = [package for package in packages if package.name == package_name]
- pending_tags = retry_function(
- func=lambda: update_changelogs(packages), cleanup=reset_repository)
+ pending_tags = retry_function(func=lambda: update_changelogs(packages), cleanup=reset_repository)
push_tags(pending_tags)
@@ -559,8 +548,7 @@ def validate_git_root():
"""
Validate that the script is run from the root of the repository.
"""
- repo_root = subprocess.check_output(["git", "rev-parse",
- "--show-toplevel"]).strip().decode("utf-8")
+ repo_root = subprocess.check_output(["git", "rev-parse", "--show-toplevel"]).strip().decode("utf-8")
current_dir = subprocess.check_output(["pwd"]).strip().decode("utf-8")
if repo_root != current_dir:
raise Exception("Please run this script from the root of the repository.")