Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Restored test checking quota exhaustion #15968

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 0 additions & 1 deletion .github/config/muted_ya.txt
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,6 @@ ydb/tests/functional/tpc/large test_tpcds.py.TestTpcdsS1.test_tpcds[86]
ydb/tests/functional/tpc/large test_tpcds.py.TestTpcdsS1.test_tpcds[9]
ydb/tests/functional/tpc/large test_tpch_spilling.py.TestTpchSpillingS10.test_tpch[7]
ydb/tests/olap sole chunk chunk
ydb/tests/olap test_quota_exhaustion.py.TestYdbWorkload.test_delete
ydb/tests/olap/column_family/compression alter_compression.py.TestAlterCompression.test_all_supported_compression
ydb/tests/olap/column_family/compression alter_compression.py.TestAlterCompression.test_availability_data
ydb/tests/olap/column_family/compression sole chunk chunk
Expand Down
53 changes: 37 additions & 16 deletions ydb/tests/olap/test_quota_exhaustion.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@
from ydb.tests.library.harness.kikimr_runner import KiKiMR
from ydb.tests.library.test_meta import link_test_case

ROWS_CHUNK_SIZE = 3000000
ROWS_CHUNKS_COUNT = 100000
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

А почему тут понадобилось значения менять?

ROWS_CHUNK_SIZE = 1000000
ROWS_CHUNKS_COUNT = 50


class TestYdbWorkload(object):
Expand All @@ -26,14 +26,14 @@ def teardown_class(cls):
cls.cluster.stop()

def make_session(self):
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database='/Root')
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database=self.database_name)
session = ydb.QuerySessionPool(driver)
driver.wait(5, fail_fast=True)
return session

def create_test_table(self, session, table):
return session.execute_with_retries(f"""
CREATE TABLE {table} (
CREATE TABLE `{table}` (
k Int32 NOT NULL,
v Uint64,
PRIMARY KEY (k)
Expand All @@ -46,7 +46,7 @@ def upsert_test_chunk(self, session, table, chunk_id, retries=10):
$values_list = ListReplicate(42ul, $n);
$rows_list = ListFoldMap($values_list, {chunk_id * ROWS_CHUNK_SIZE}, ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));

UPSERT INTO {table}
UPSERT INTO `{table}`
SELECT * FROM AS_TABLE($rows_list);
""", None, ydb.retries.RetrySettings(max_retries=retries))

Expand All @@ -55,12 +55,17 @@ def upsert_until_overload(self, session, table):
for i in range(ROWS_CHUNKS_COUNT):
res = self.upsert_test_chunk(session, table, i, retries=0)
print(f"upsert #{i} ok, result:", res, file=sys.stderr)
described = self.cluster.client.describe('/Root', '')
print('Quota exceeded {}'.format(described.PathDescription.DomainDescription.DomainState.DiskQuotaExceeded), file=sys.stderr)
except ydb.issues.Overloaded:
print('upsert: got overload issue', file=sys.stderr)
except ydb.issues.Unavailable:
print('upsert: got overload issue', file=sys.stderr)

@link_test_case("#13529")
def test(self):
"""As per https://github.com/ydb-platform/ydb/issues/13529"""
self.database_name = '/Root'
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Вынеси self.database_name на уровень класса по аналогии с этим примером

session = self.make_session()

# Overflow the database
Expand All @@ -76,7 +81,7 @@ def test(self):

def delete_test_chunk(self, session, table, chunk_id, retries=10):
session.execute_with_retries(f"""
DELETE FROM {table}
DELETE FROM `{table}`
WHERE {chunk_id * ROWS_CHUNK_SIZE} <= k AND k <= {chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE}
""", None, ydb.retries.RetrySettings(max_retries=retries))

Expand All @@ -85,9 +90,13 @@ def delete_until_overload(self, session, table):
try:
self.delete_test_chunk(session, table, i, retries=0)
print(f"delete #{i} ok", file=sys.stderr)
except ydb.issues.Unavailable:
print('delete: got unavailable issue', file=sys.stderr)
return i
except ydb.issues.Overloaded:
print('delete: got overload issue', file=sys.stderr)
return i
return ROWS_CHUNKS_COUNT

def ydbcli_db_schema_exec(self, node, operation_proto):
endpoint = f"{node.host}:{node.port}"
Expand Down Expand Up @@ -122,24 +131,36 @@ def alter_database_quotas(self, node, database_path, database_quotas):

def test_delete(self):
"""As per https://github.com/ydb-platform/ydb/issues/13653"""
session = self.make_session()
self.database_name = os.path.join('/Root', 'test')
print('Database name {}'.format(self.database_name), file=sys.stderr)
self.cluster.create_database(
self.database_name,
storage_pool_units_count={
'hdd': 1
},
)
self.cluster.register_and_start_slots(self.database_name, count=1)
self.cluster.wait_tenant_up(self.database_name)

# Set soft and hard quotas to 6GB
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Тут комменатрий стал не актуален про 6 GB, а уменьшил это значение чтобы тест быстрее проходил?

self.alter_database_quotas(self.cluster.nodes[1], '/Root', """
data_size_hard_quota: 6000000000
data_size_soft_quota: 6000000000
self.alter_database_quotas(self.cluster.nodes[1], self.database_name, """
data_size_hard_quota: 40000000
data_size_soft_quota: 40000000
""")

session = self.make_session()

# Overflow the database
self.create_test_table(session, 'huge')
self.upsert_until_overload(session, 'huge')
table_path = os.path.join(self.database_name, 'huge')
self.create_test_table(session, table_path)
self.upsert_until_overload(session, table_path)

# Check that deletion works at least first time
# self.delete_test_chunk(session, 'huge', 0)
self.delete_test_chunk(session, table_path, 0)
# ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808
Copy link
Collaborator

@dorooleg dorooleg Mar 20, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Комментарий можно убирать?


# Check that deletions will lead to overflow at some moment
i = self.delete_until_overload(session, 'huge')
i = self.delete_until_overload(session, table_path)

# Try to wait until deletion works again (after compaction)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Напиши комментарий что это в отдельном ревью будет проверяться

self.delete_test_chunk(session, 'huge', i)
# Check that all DELETE statements are completed
assert i == ROWS_CHUNKS_COUNT
Loading