-
Notifications
You must be signed in to change notification settings - Fork 617
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Restored test checking quota exhaustion #15968
base: main
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,8 +7,8 @@ | |
from ydb.tests.library.harness.kikimr_runner import KiKiMR | ||
from ydb.tests.library.test_meta import link_test_case | ||
|
||
ROWS_CHUNK_SIZE = 3000000 | ||
ROWS_CHUNKS_COUNT = 100000 | ||
ROWS_CHUNK_SIZE = 1000000 | ||
ROWS_CHUNKS_COUNT = 50 | ||
|
||
|
||
class TestYdbWorkload(object): | ||
|
@@ -26,14 +26,14 @@ def teardown_class(cls): | |
cls.cluster.stop() | ||
|
||
def make_session(self): | ||
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database='/Root') | ||
driver = ydb.Driver(endpoint=f'grpc://localhost:{self.cluster.nodes[1].grpc_port}', database=self.database_name) | ||
session = ydb.QuerySessionPool(driver) | ||
driver.wait(5, fail_fast=True) | ||
return session | ||
|
||
def create_test_table(self, session, table): | ||
return session.execute_with_retries(f""" | ||
CREATE TABLE {table} ( | ||
CREATE TABLE `{table}` ( | ||
k Int32 NOT NULL, | ||
v Uint64, | ||
PRIMARY KEY (k) | ||
|
@@ -46,7 +46,7 @@ def upsert_test_chunk(self, session, table, chunk_id, retries=10): | |
$values_list = ListReplicate(42ul, $n); | ||
$rows_list = ListFoldMap($values_list, {chunk_id * ROWS_CHUNK_SIZE}, ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1))); | ||
|
||
UPSERT INTO {table} | ||
UPSERT INTO `{table}` | ||
SELECT * FROM AS_TABLE($rows_list); | ||
""", None, ydb.retries.RetrySettings(max_retries=retries)) | ||
|
||
|
@@ -55,12 +55,17 @@ def upsert_until_overload(self, session, table): | |
for i in range(ROWS_CHUNKS_COUNT): | ||
res = self.upsert_test_chunk(session, table, i, retries=0) | ||
print(f"upsert #{i} ok, result:", res, file=sys.stderr) | ||
described = self.cluster.client.describe('/Root', '') | ||
print('Quota exceeded {}'.format(described.PathDescription.DomainDescription.DomainState.DiskQuotaExceeded), file=sys.stderr) | ||
except ydb.issues.Overloaded: | ||
print('upsert: got overload issue', file=sys.stderr) | ||
except ydb.issues.Unavailable: | ||
print('upsert: got overload issue', file=sys.stderr) | ||
|
||
@link_test_case("#13529") | ||
def test(self): | ||
"""As per https://github.com/ydb-platform/ydb/issues/13529""" | ||
self.database_name = '/Root' | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Вынеси |
||
session = self.make_session() | ||
|
||
# Overflow the database | ||
|
@@ -76,7 +81,7 @@ def test(self): | |
|
||
def delete_test_chunk(self, session, table, chunk_id, retries=10): | ||
session.execute_with_retries(f""" | ||
DELETE FROM {table} | ||
DELETE FROM `{table}` | ||
WHERE {chunk_id * ROWS_CHUNK_SIZE} <= k AND k <= {chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE} | ||
""", None, ydb.retries.RetrySettings(max_retries=retries)) | ||
|
||
|
@@ -85,9 +90,13 @@ def delete_until_overload(self, session, table): | |
try: | ||
self.delete_test_chunk(session, table, i, retries=0) | ||
print(f"delete #{i} ok", file=sys.stderr) | ||
except ydb.issues.Unavailable: | ||
print('delete: got unavailable issue', file=sys.stderr) | ||
return i | ||
except ydb.issues.Overloaded: | ||
print('delete: got overload issue', file=sys.stderr) | ||
return i | ||
return ROWS_CHUNKS_COUNT | ||
|
||
def ydbcli_db_schema_exec(self, node, operation_proto): | ||
endpoint = f"{node.host}:{node.port}" | ||
|
@@ -122,24 +131,36 @@ def alter_database_quotas(self, node, database_path, database_quotas): | |
|
||
def test_delete(self): | ||
"""As per https://github.com/ydb-platform/ydb/issues/13653""" | ||
session = self.make_session() | ||
self.database_name = os.path.join('/Root', 'test') | ||
print('Database name {}'.format(self.database_name), file=sys.stderr) | ||
self.cluster.create_database( | ||
self.database_name, | ||
storage_pool_units_count={ | ||
'hdd': 1 | ||
}, | ||
) | ||
self.cluster.register_and_start_slots(self.database_name, count=1) | ||
self.cluster.wait_tenant_up(self.database_name) | ||
|
||
# Set soft and hard quotas to 6GB | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Тут комменатрий стал не актуален про 6 GB, а уменьшил это значение чтобы тест быстрее проходил? |
||
self.alter_database_quotas(self.cluster.nodes[1], '/Root', """ | ||
data_size_hard_quota: 6000000000 | ||
data_size_soft_quota: 6000000000 | ||
self.alter_database_quotas(self.cluster.nodes[1], self.database_name, """ | ||
data_size_hard_quota: 40000000 | ||
data_size_soft_quota: 40000000 | ||
""") | ||
|
||
session = self.make_session() | ||
|
||
# Overflow the database | ||
self.create_test_table(session, 'huge') | ||
self.upsert_until_overload(session, 'huge') | ||
table_path = os.path.join(self.database_name, 'huge') | ||
self.create_test_table(session, table_path) | ||
self.upsert_until_overload(session, table_path) | ||
|
||
# Check that deletion works at least first time | ||
# self.delete_test_chunk(session, 'huge', 0) | ||
self.delete_test_chunk(session, table_path, 0) | ||
# ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808 | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Комментарий можно убирать? |
||
|
||
# Check that deletions will lead to overflow at some moment | ||
i = self.delete_until_overload(session, 'huge') | ||
i = self.delete_until_overload(session, table_path) | ||
|
||
# Try to wait until deletion works again (after compaction) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Напиши комментарий что это в отдельном ревью будет проверяться |
||
self.delete_test_chunk(session, 'huge', i) | ||
# Check that all DELETE statements are completed | ||
assert i == ROWS_CHUNKS_COUNT |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
А почему тут понадобилось значения менять?