Skip to content

Commit afcf0b0

Browse files
authored
Restored test checking quota exhaustion (#15968)
1 parent ab43a23 commit afcf0b0

File tree

2 files changed

+40
-19
lines changed

2 files changed

+40
-19
lines changed

.github/config/muted_ya.txt

-1
Original file line numberDiff line numberDiff line change
@@ -227,7 +227,6 @@ ydb/tests/functional/tpc/large test_tpcds.py.TestTpcdsS1.test_tpcds[86]
227227
ydb/tests/functional/tpc/large test_tpcds.py.TestTpcdsS1.test_tpcds[9]
228228
ydb/tests/functional/tpc/large test_tpch_spilling.py.TestTpchSpillingS10.test_tpch[7]
229229
ydb/tests/olap sole chunk chunk
230-
ydb/tests/olap test_quota_exhaustion.py.TestYdbWorkload.test_delete
231230
ydb/tests/olap/column_family/compression alter_compression.py.TestAlterCompression.test_all_supported_compression
232231
ydb/tests/olap/column_family/compression sole chunk chunk
233232
ydb/tests/olap/scenario sole chunk chunk

ydb/tests/olap/test_quota_exhaustion.py

+40-18
Original file line numberDiff line numberDiff line change
@@ -7,8 +7,8 @@
77
from ydb.tests.library.harness.kikimr_runner import KiKiMR
88
from ydb.tests.library.test_meta import link_test_case
99

10-
ROWS_CHUNK_SIZE = 3000000
11-
ROWS_CHUNKS_COUNT = 100000
10+
ROWS_CHUNK_SIZE = 1000000
11+
ROWS_CHUNKS_COUNT = 50
1212

1313

1414
class TestYdbWorkload(object):
@@ -26,14 +26,14 @@ def teardown_class(cls):
2626
cls.cluster.stop()
2727

2828
def make_session(self):
29-
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database='/Root')
29+
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database=self.database_name)
3030
session = ydb.QuerySessionPool(driver)
3131
driver.wait(5, fail_fast=True)
3232
return session
3333

3434
def create_test_table(self, session, table):
3535
return session.execute_with_retries(f"""
36-
CREATE TABLE {table} (
36+
CREATE TABLE `{table}` (
3737
k Int32 NOT NULL,
3838
v Uint64,
3939
PRIMARY KEY (k)
@@ -46,7 +46,7 @@ def upsert_test_chunk(self, session, table, chunk_id, retries=10):
4646
$values_list = ListReplicate(42ul, $n);
4747
$rows_list = ListFoldMap($values_list, {chunk_id * ROWS_CHUNK_SIZE}, ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
4848
49-
UPSERT INTO {table}
49+
UPSERT INTO `{table}`
5050
SELECT * FROM AS_TABLE($rows_list);
5151
""", None, ydb.retries.RetrySettings(max_retries=retries))
5252

@@ -55,12 +55,17 @@ def upsert_until_overload(self, session, table):
5555
for i in range(ROWS_CHUNKS_COUNT):
5656
res = self.upsert_test_chunk(session, table, i, retries=0)
5757
print(f"upsert #{i} ok, result:", res, file=sys.stderr)
58+
described = self.cluster.client.describe('/Root', '')
59+
print('Quota exceeded {}'.format(described.PathDescription.DomainDescription.DomainState.DiskQuotaExceeded), file=sys.stderr)
5860
except ydb.issues.Overloaded:
5961
print('upsert: got overload issue', file=sys.stderr)
62+
except ydb.issues.Unavailable:
63+
print('upsert: got overload issue', file=sys.stderr)
6064

6165
@link_test_case("#13529")
6266
def test(self):
6367
"""As per https://github.com/ydb-platform/ydb/issues/13529"""
68+
self.database_name = '/Root'
6469
session = self.make_session()
6570

6671
# Overflow the database
@@ -76,7 +81,7 @@ def test(self):
7681

7782
def delete_test_chunk(self, session, table, chunk_id, retries=10):
7883
session.execute_with_retries(f"""
79-
DELETE FROM {table}
84+
DELETE FROM `{table}`
8085
WHERE {chunk_id * ROWS_CHUNK_SIZE} <= k AND k <= {chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE}
8186
""", None, ydb.retries.RetrySettings(max_retries=retries))
8287

@@ -85,9 +90,13 @@ def delete_until_overload(self, session, table):
8590
try:
8691
self.delete_test_chunk(session, table, i, retries=0)
8792
print(f"delete #{i} ok", file=sys.stderr)
93+
except ydb.issues.Unavailable:
94+
print('delete: got unavailable issue', file=sys.stderr)
95+
return i
8896
except ydb.issues.Overloaded:
8997
print('delete: got overload issue', file=sys.stderr)
9098
return i
99+
return ROWS_CHUNKS_COUNT
91100

92101
def ydbcli_db_schema_exec(self, node, operation_proto):
93102
endpoint = f"{node.host}:{node.port}"
@@ -122,24 +131,37 @@ def alter_database_quotas(self, node, database_path, database_quotas):
122131

123132
def test_delete(self):
124133
"""As per https://github.com/ydb-platform/ydb/issues/13653"""
125-
session = self.make_session()
134+
self.database_name = os.path.join('/Root', 'test')
135+
print('Database name {}'.format(self.database_name), file=sys.stderr)
136+
self.cluster.create_database(
137+
self.database_name,
138+
storage_pool_units_count={
139+
'hdd': 1
140+
},
141+
)
142+
self.cluster.register_and_start_slots(self.database_name, count=1)
143+
self.cluster.wait_tenant_up(self.database_name)
126144

127-
# Set soft and hard quotas to 6GB
128-
self.alter_database_quotas(self.cluster.nodes[1], '/Root', """
129-
data_size_hard_quota: 6000000000
130-
data_size_soft_quota: 6000000000
145+
# Set soft and hard quotas to 40 Mb
146+
self.alter_database_quotas(self.cluster.nodes[1], self.database_name, """
147+
data_size_hard_quota: 40000000
148+
data_size_soft_quota: 40000000
131149
""")
132150

151+
session = self.make_session()
152+
133153
# Overflow the database
134-
self.create_test_table(session, 'huge')
135-
self.upsert_until_overload(session, 'huge')
154+
table_path = os.path.join(self.database_name, 'huge')
155+
self.create_test_table(session, table_path)
156+
self.upsert_until_overload(session, table_path)
136157

137158
# Check that deletion works at least first time
138-
# self.delete_test_chunk(session, 'huge', 0)
139-
# ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808
159+
self.delete_test_chunk(session, table_path, 0)
140160

141161
# Check that deletions will lead to overflow at some moment
142-
i = self.delete_until_overload(session, 'huge')
162+
i = self.delete_until_overload(session, table_path)
163+
164+
# Check that all DELETE statements are completed
165+
assert i == ROWS_CHUNKS_COUNT
143166

144-
# Try to wait until deletion works again (after compaction)
145-
self.delete_test_chunk(session, 'huge', i)
167+
# Writes enabling after data deletion will be checked in separate PR

0 commit comments

Comments
 (0)