Skip to content

Commit

Permalink
Revert "tests: Adapt to new cluster size reality"
Browse files Browse the repository at this point in the history
This reverts commit 636264d.
  • Loading branch information
def- committed Nov 12, 2024
1 parent 6941d74 commit 66cf848
Show file tree
Hide file tree
Showing 12 changed files with 177 additions and 81 deletions.
7 changes: 4 additions & 3 deletions misc/python/materialize/cli/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -275,9 +275,10 @@ def main() -> int:
if args.monitoring:
command += ["--opentelemetry-endpoint=http://localhost:4317"]
elif args.program == "sqllogictest":
params = get_default_system_parameters()
params["enable_columnation_lgalloc"] = "false"
formatted_params = [f"{key}={value}" for key, value in params.items()]
formatted_params = [
f"{key}={value}"
for key, value in get_default_system_parameters().items()
]
system_parameter_default = ";".join(formatted_params)
# Connect to the database to ensure it exists.
_connect_sql(args.postgres)
Expand Down
8 changes: 4 additions & 4 deletions misc/python/materialize/mzcompose/services/sql_logic_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,12 +28,12 @@ def __init__(
volumes: list[str] = ["../..:/workdir"],
depends_on: list[str] = [METADATA_STORE],
) -> None:
params = get_default_system_parameters()
# Otherwise very noisy in SLT: lgalloc error: I/O error, falling back to heap
params["enable_columnation_lgalloc"] = "false"
environment += [
"MZ_SYSTEM_PARAMETER_DEFAULT="
+ ";".join(f"{key}={value}" for key, value in params.items())
+ ";".join(
f"{key}={value}"
for key, value in get_default_system_parameters().items()
)
]

super().__init__(
Expand Down
1 change: 0 additions & 1 deletion misc/python/materialize/parallel_benchmark/scenarios.py
Original file line number Diff line number Diff line change
Expand Up @@ -746,7 +746,6 @@ def __init__(self, c: Composition, conn_infos: dict[str, PgConnInfo]):
c,
),
dist=Periodic(per_second=1),
report_regressions=False, # Don't care about this
),
ClosedLoop(
action=StandaloneQuery(
Expand Down
6 changes: 3 additions & 3 deletions src/catalog/tests/snapshots/debug__opened_trace.snap
Original file line number Diff line number Diff line change
Expand Up @@ -374,7 +374,7 @@ Trace {
),
replica_name: "r1",
logical_size: "1",
disk: true,
disk: false,
billed_as: None,
internal: false,
reason: Some(
Expand Down Expand Up @@ -558,7 +558,7 @@ Trace {
),
},
),
disk: true,
disk: false,
optimizer_feature_overrides: [],
schedule: Some(
ClusterSchedule {
Expand Down Expand Up @@ -628,7 +628,7 @@ Trace {
ManagedLocation {
size: "1",
availability_zone: None,
disk: true,
disk: false,
internal: false,
billed_as: None,
pending: false,
Expand Down
3 changes: 1 addition & 2 deletions src/catalog/tests/snapshots/open__initial_audit_log.snap
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
---
source: src/catalog/tests/open.rs
expression: audit_log
snapshot_kind: text
---
[
V1(
Expand Down Expand Up @@ -187,7 +186,7 @@ snapshot_kind: text
),
replica_name: "r1",
logical_size: "1",
disk: true,
disk: false,
billed_as: None,
internal: false,
reason: System,
Expand Down
5 changes: 2 additions & 3 deletions src/catalog/tests/snapshots/open__initial_snapshot.snap
Original file line number Diff line number Diff line change
@@ -1,7 +1,6 @@
---
source: src/catalog/tests/open.rs
expression: test_snapshot
snapshot_kind: text
---
Snapshot {
databases: {
Expand Down Expand Up @@ -1221,7 +1220,7 @@ Snapshot {
),
},
),
disk: true,
disk: false,
optimizer_feature_overrides: [],
schedule: Some(
ClusterSchedule {
Expand Down Expand Up @@ -1347,7 +1346,7 @@ Snapshot {
ManagedLocation {
size: "1",
availability_zone: None,
disk: true,
disk: false,
internal: false,
billed_as: None,
pending: false,
Expand Down
30 changes: 18 additions & 12 deletions test/cloudtest/test_compute.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,21 +133,27 @@ def test_disk_label(mz: MaterializeApplication) -> None:
user="mz_system",
)

mz.environmentd.sql("CREATE CLUSTER disk MANAGED, SIZE = '2-1', DISK = true")
for value in ("true", "false"):
mz.environmentd.sql(
f"CREATE CLUSTER disk_{value} MANAGED, SIZE = '2-1', DISK = {value}"
)

(cluster_id, replica_id) = mz.environmentd.sql_query(
"SELECT mz_clusters.id, mz_cluster_replicas.id FROM mz_cluster_replicas JOIN mz_clusters ON mz_cluster_replicas.cluster_id = mz_clusters.id WHERE mz_clusters.name = 'disk'"
)[0]
assert cluster_id is not None
assert replica_id is not None
(cluster_id, replica_id) = mz.environmentd.sql_query(
f"SELECT mz_clusters.id, mz_cluster_replicas.id FROM mz_cluster_replicas JOIN mz_clusters ON mz_cluster_replicas.cluster_id = mz_clusters.id WHERE mz_clusters.name = 'disk_{value}'"
)[0]
assert cluster_id is not None
assert replica_id is not None

node_selectors = get_node_selector(mz, cluster_id, replica_id)
assert (
node_selectors
== '\'{"materialize.cloud/disk":"true"} {"materialize.cloud/disk":"true"}\''
), node_selectors
node_selectors = get_node_selector(mz, cluster_id, replica_id)
if value == "true":
assert (
node_selectors
== '\'{"materialize.cloud/disk":"true"} {"materialize.cloud/disk":"true"}\''
), node_selectors
else:
assert node_selectors == "''"

mz.environmentd.sql("DROP CLUSTER disk CASCADE")
mz.environmentd.sql(f"DROP CLUSTER disk_{value} CASCADE")

# Reset
mz.environmentd.sql(
Expand Down
111 changes: 81 additions & 30 deletions test/cloudtest/test_disk.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def test_disk_replica(mz: MaterializeApplication) -> None:
> CREATE CLUSTER testdrive_no_reset_disk_cluster1
REPLICAS (r1 (
SIZE '1'
SIZE '1', DISK = true
))
> CREATE CONNECTION IF NOT EXISTS kafka TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT)
Expand Down Expand Up @@ -83,7 +83,7 @@ def test_disk_replica(mz: MaterializeApplication) -> None:


def test_always_use_disk_replica(mz: MaterializeApplication) -> None:
"""Testing `cluster_always_use_disk = true` cluster replicas"""
"""Testing `DISK = false, cluster_always_use_disk = true` cluster replicas"""
mz.environmentd.sql(
"ALTER SYSTEM SET cluster_always_use_disk = true",
port="internal",
Expand All @@ -93,43 +93,43 @@ def test_always_use_disk_replica(mz: MaterializeApplication) -> None:
mz.testdrive.run(
input=dedent(
"""
$ kafka-create-topic topic=test
$ kafka-create-topic topic=test
$ kafka-ingest key-format=bytes format=bytes topic=test
key1:val1
key2:val2
$ kafka-ingest key-format=bytes format=bytes topic=test
key1:val1
key2:val2
> CREATE CLUSTER disk_cluster2
REPLICAS (r1 (SIZE '1'))
> CREATE CLUSTER disk_cluster2
REPLICAS (r1 (SIZE '1'))
> CREATE CONNECTION IF NOT EXISTS kafka TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT)
> CREATE CONNECTION IF NOT EXISTS kafka TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT)
> CREATE SOURCE source1
IN CLUSTER disk_cluster2
FROM KAFKA CONNECTION kafka
(TOPIC 'testdrive-test-${testdrive.seed}');
> CREATE SOURCE source1
IN CLUSTER disk_cluster2
FROM KAFKA CONNECTION kafka
(TOPIC 'testdrive-test-${testdrive.seed}');
> CREATE TABLE source1_tbl FROM SOURCE source1 (REFERENCE "testdrive-test-${testdrive.seed}")
KEY FORMAT TEXT
VALUE FORMAT TEXT
ENVELOPE UPSERT;
> CREATE TABLE source1_tbl FROM SOURCE source1 (REFERENCE "testdrive-test-${testdrive.seed}")
KEY FORMAT TEXT
VALUE FORMAT TEXT
ENVELOPE UPSERT;
> SELECT * FROM source1_tbl;
key text
------------------
key1 val1
key2 val2
> SELECT * FROM source1_tbl;
key text
------------------
key1 val1
key2 val2
$ kafka-ingest key-format=bytes format=bytes topic=test
key1:val3
$ kafka-ingest key-format=bytes format=bytes topic=test
key1:val3
> SELECT * FROM source1_tbl;
key text
------------------
key1 val3
key2 val2
"""
> SELECT * FROM source1_tbl;
key text
------------------
key1 val3
key2 val2
"""
)
)

Expand All @@ -153,3 +153,54 @@ def test_always_use_disk_replica(mz: MaterializeApplication) -> None:
"ls /scratch/storage/upsert",
)
assert source_global_id in on_disk_sources


def test_no_disk_replica(mz: MaterializeApplication) -> None:
"""Testing `DISK = false` cluster replicas"""
mz.testdrive.run(
input=dedent(
"""
$ kafka-create-topic topic=test-no-disk
$ kafka-ingest key-format=bytes format=bytes topic=test-no-disk
key1:val1
key2:val2
> CREATE CLUSTER no_disk_cluster1
REPLICAS (r1 (
SIZE '1', DISK = false
))
> CREATE CONNECTION IF NOT EXISTS kafka
TO KAFKA (BROKER '${testdrive.kafka-addr}', SECURITY PROTOCOL PLAINTEXT)
> CREATE SOURCE no_disk_source1
IN CLUSTER no_disk_cluster1
FROM KAFKA CONNECTION kafka
(TOPIC 'testdrive-test-no-disk-${testdrive.seed}');
> CREATE TABLE no_disk_source1_tbl FROM SOURCE no_disk_source1 (REFERENCE "testdrive-test-no-disk-${testdrive.seed}")
KEY FORMAT TEXT
VALUE FORMAT TEXT
ENVELOPE UPSERT;
> SELECT * FROM no_disk_source1_tbl;
key text
------------------
key1 val1
key2 val2
$ kafka-ingest key-format=bytes format=bytes topic=test-no-disk
key1:val3
> SELECT * FROM no_disk_source1_tbl;
key text
------------------
key1 val3
key2 val2
> DROP CLUSTER no_disk_cluster1 CASCADE;
"""
)
)
2 changes: 1 addition & 1 deletion test/sqllogictest/audit_log.slt
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ SELECT id, event_type, object_type, details, user FROM mz_audit_events ORDER BY
12 create cluster {"id":"u1","name":"quickstart"} NULL
13 grant cluster {"grantee_id":"p","grantor_id":"s1","object_id":"Cu1","privileges":"U"} NULL
14 grant cluster {"grantee_id":"u1","grantor_id":"s1","object_id":"Cu1","privileges":"UC"} NULL
15 create cluster-replica {"billed_as":null,"cluster_id":"u1","cluster_name":"quickstart","disk":true,"internal":false,"logical_size":"2","reason":"system","replica_id":"u1","replica_name":"r1"} NULL
15 create cluster-replica {"billed_as":null,"cluster_id":"u1","cluster_name":"quickstart","disk":false,"internal":false,"logical_size":"2","reason":"system","replica_id":"u1","replica_name":"r1"} NULL
16 grant system {"grantee_id":"s1","grantor_id":"s1","object_id":"SYSTEM","privileges":"RBNP"} NULL
17 grant system {"grantee_id":"u1","grantor_id":"s1","object_id":"SYSTEM","privileges":"RBNP"} NULL
18 alter system {"name":"enable_reduce_mfp_fusion","value":"on"} mz_system
Expand Down
17 changes: 13 additions & 4 deletions test/sqllogictest/managed_cluster.slt
Original file line number Diff line number Diff line change
Expand Up @@ -420,24 +420,33 @@ ALTER SYSTEM SET enable_disk_cluster_replicas = true;
----
COMPLETE 0

statement error db error: ERROR: DISK option not supported for non-legacy cluster sizes because disk is always enabled
statement ok
CREATE CLUSTER foo REPLICAS (r1 (SIZE '1'), r2 (SIZE '1', DISK))

statement error db error: ERROR: unknown cluster 'foo'
statement error db error: ERROR: Cluster replicas with DISK true do not match expected DISK false
ALTER CLUSTER foo SET (MANAGED, DISK=False, SIZE '1')

statement ok
DROP CLUSTER foo

statement ok
CREATE CLUSTER foo REPLICAS (r1 (SIZE '1'))

statement error db error: ERROR: DISK option not supported for modern cluster sizes because disk is always enabled
statement error db error: ERROR: Cluster replicas with DISK true do not match expected DISK false
ALTER CLUSTER foo SET (MANAGED, SIZE '1', DISK=False)

statement ok
DROP CLUSTER foo

statement error db error: ERROR: DISK option not supported for non-legacy cluster sizes because disk is always enabled
statement ok
CREATE CLUSTER foo REPLICAS (r1 (SIZE '1', DISK), r2 (SIZE '1', DISK))

statement ok
ALTER CLUSTER foo SET (MANAGED, SIZE '1', DISK)

statement ok
DROP CLUSTER foo


simple conn=mz_system,user=mz_system
ALTER SYSTEM SET enable_graceful_cluster_reconfiguration = true;
Expand Down
Loading

0 comments on commit 66cf848

Please sign in to comment.