Skip to content

Commit ef1c6dd

Browse files
authored
Adding flag to turn off iceberg (Netflix#303)
1 parent aede8de commit ef1c6dd

File tree

10 files changed

+31
-8
lines changed

10 files changed

+31
-8
lines changed

metacat-common-server/src/main/java/com/netflix/metacat/common/server/properties/Config.java

+7
Original file line numberDiff line numberDiff line change
@@ -381,5 +381,12 @@ public interface Config {
381381
* @return Iceberg Table Summary Fetch Size
382382
*/
383383
int getIcebergTableSummaryFetchSize();
384+
/**
385+
* Enable iceberg table processing.
386+
*
387+
* @return true if iceberg table processing is enabled
388+
*/
389+
boolean isIcebergEnabled();
390+
384391
}
385392

metacat-common-server/src/main/java/com/netflix/metacat/common/server/properties/DefaultConfigImpl.java

+7
Original file line numberDiff line numberDiff line change
@@ -446,4 +446,11 @@ public int getIcebergTableSummaryFetchSize() {
446446
return this.metacatProperties.getHive().getIceberg().getFetchSizeInTableSummary();
447447
}
448448

449+
/**
450+
* {@inheritDoc}
451+
*/
452+
@Override
453+
public boolean isIcebergEnabled() {
454+
return this.metacatProperties.getHive().getIceberg().isEnabled();
455+
}
449456
}

metacat-common-server/src/main/java/com/netflix/metacat/common/server/properties/HiveProperties.java

+1
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ public static class Whitelist {
9696
*/
9797
@Data
9898
public static class Iceberg {
99+
private boolean enabled;
99100
private int fetchSizeInTableSummary = 100;
100101
}
101102
}

metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorPartitionService.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -74,8 +74,8 @@
7474
*/
7575
@Getter
7676
public class HiveConnectorPartitionService implements ConnectorPartitionService {
77+
protected final ConnectorContext context;
7778
private final String catalogName;
78-
private final ConnectorContext context;
7979
private final HiveConnectorInfoConverter hiveMetacatConverters;
8080
private final IMetacatHiveClient metacatHiveClient;
8181

metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/HiveConnectorTableService.java

+3-1
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,7 @@
7171
public class HiveConnectorTableService implements ConnectorTableService {
7272
private static final String PARAMETER_EXTERNAL = "EXTERNAL";
7373
protected final HiveConnectorInfoConverter hiveMetacatConverters;
74+
protected final ConnectorContext connectorContext;
7475
private final String catalogName;
7576
private final IMetacatHiveClient metacatHiveClient;
7677
private final HiveConnectorDatabaseService hiveConnectorDatabaseService;
@@ -104,6 +105,7 @@ public HiveConnectorTableService(
104105
connectorContext.getConfiguration().getOrDefault(HiveConfigConstants.ON_RENAME_CONVERT_TO_EXTERNAL,
105106
"true")
106107
);
108+
this.connectorContext = connectorContext;
107109
}
108110

109111
/**
@@ -177,7 +179,7 @@ void updateTable(
177179
table.getParameters().putAll(tableInfo.getMetadata());
178180
}
179181
//no other information is needed for iceberg table
180-
if (HiveTableUtil.isIcebergTable(tableInfo)) {
182+
if (connectorContext.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
181183
table.setPartitionKeys(Collections.emptyList());
182184
log.debug("Skipping seder and set partition key to empty when updating iceberg table in hive");
183185
return;

metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastPartitionService.java

+6-5
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ public int getPartitionCount(
107107
final QualifiedName tableName,
108108
final TableInfo tableInfo
109109
) {
110-
if (HiveTableUtil.isIcebergTable(tableInfo)) {
110+
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
111111
throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
112112
}
113113
return directSqlGetPartition.getPartitionCount(requestContext, tableName);
@@ -122,7 +122,7 @@ public List<PartitionInfo> getPartitions(
122122
final QualifiedName tableName,
123123
final PartitionListRequest partitionsRequest,
124124
final TableInfo tableInfo) {
125-
return (HiveTableUtil.isIcebergTable(tableInfo))
125+
return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)
126126
? getIcebergPartitionInfos(tableInfo, partitionsRequest)
127127
: directSqlGetPartition.getPartitions(requestContext, tableName, partitionsRequest);
128128
}
@@ -136,14 +136,15 @@ public List<String> getPartitionKeys(final ConnectorRequestContext requestContex
136136
final PartitionListRequest partitionsRequest,
137137
final TableInfo tableInfo) {
138138

139-
return (HiveTableUtil.isIcebergTable(tableInfo))
139+
return context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)
140140
? getIcebergPartitionInfos(tableInfo, partitionsRequest)
141141
.stream().map(info -> info.getName().getPartitionName()).collect(Collectors.toList())
142142
:
143143
directSqlGetPartition.getPartitionKeys(requestContext, tableName, partitionsRequest);
144144

145145
}
146146

147+
147148
/**
148149
* {@inheritDoc}.
149150
*/
@@ -154,7 +155,7 @@ public List<String> getPartitionUris(
154155
final PartitionListRequest partitionsRequest,
155156
final TableInfo tableInfo
156157
) {
157-
if (HiveTableUtil.isIcebergTable(tableInfo)) {
158+
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
158159
throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
159160
}
160161
return directSqlGetPartition.getPartitionUris(requestContext, tableName, partitionsRequest);
@@ -327,7 +328,7 @@ public void deletePartitions(
327328
final TableInfo tableInfo
328329
) {
329330
//TODO: implemented as next step
330-
if (HiveTableUtil.isIcebergTable(tableInfo)) {
331+
if (context.getConfig().isIcebergEnabled() && HiveTableUtil.isIcebergTable(tableInfo)) {
331332
throw new MetacatNotSupportedException("IcebergTable Unsupported Operation!");
332333
}
333334
//The direct sql based deletion doesn't check if the partition is valid

metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/sql/HiveConnectorFastTableService.java

+1-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ public boolean exists(final ConnectorRequestContext requestContext, final Qualif
9393
@Override
9494
public TableInfo get(final ConnectorRequestContext requestContext, final QualifiedName name) {
9595
final TableInfo info = super.get(requestContext, name);
96-
if (!HiveTableUtil.isIcebergTable(info)) {
96+
if (!connectorContext.getConfig().isIcebergEnabled() || !HiveTableUtil.isIcebergTable(info)) {
9797
return info;
9898
}
9999
final String tableLoc = HiveTableUtil.getIcebergTableMetadataLocation(info);

metacat-connector-hive/src/main/java/com/netflix/metacat/connector/hive/util/HiveTableUtil.java

+1
Original file line numberDiff line numberDiff line change
@@ -45,6 +45,7 @@
4545
public final class HiveTableUtil {
4646
private static final String PARQUET_HIVE_SERDE = "parquet.hive.serde.ParquetHiveSerDe";
4747
private static final String DUMMY_LCATION = "ICEBERG_DUMMY_LOCATION";
48+
4849
private HiveTableUtil() {
4950
}
5051

metacat-connector-hive/src/test/groovy/com/netflix/metacat/connector/hive/HiveConnectorFastPartitionSpec.groovy

+3
Original file line numberDiff line numberDiff line change
@@ -35,9 +35,12 @@ import com.netflix.metacat.connector.hive.sql.HiveConnectorFastPartitionService
3535
import com.netflix.metacat.testdata.provider.MetacatDataInfoProvider
3636
import com.netflix.spectator.api.NoopRegistry
3737
import org.apache.hadoop.hive.metastore.Warehouse
38+
import org.junit.Ignore
3839
import spock.lang.Shared
3940
import spock.lang.Specification
4041

42+
//TODO: enable this test once enable iceberg table processing
43+
@Ignore
4144
class HiveConnectorFastPartitionSpec extends Specification {
4245
@Shared
4346
MetacatHiveClient metacatHiveClient = Mock(MetacatHiveClient);

metacat-functional-tests/metacat-test-cluster/docker-compose.yml

+1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ services:
5555
-Dmetacat.definition.metadata.delete.enableForTable=false
5656
-Dmetacat.definition.metadata.delete.enableDeleteForQualifiedNames=hive-metastore/hsmoke_ddb,hive-metastore/hsmoke_ddb1/test_create_table1,embedded-hive-metastore,embedded-fast-hive-metastore/fsmoke_db1,embedded-fast-hive-metastore/fsmoke_ddb1,embedded-fast-hive-metastore/shard,embedded-fast-hive-metastore/fsmoke_db4,s3-mysql-db,mysql-56-db
5757
-Dmetacat.hive.metastore.batchSize=10
58+
-Dmetacat.hive.iceberg.enabled=true
5859
-Dmetacat.usermetadata.config.location=/etc/metacat/usermetadata.properties
5960
-Dmetacat.cache.enabled=true
6061
-Dmetacat.authorization.enabled=true

0 commit comments

Comments
 (0)