7
7
from ydb .tests .library .harness .kikimr_runner import KiKiMR
8
8
from ydb .tests .library .test_meta import link_test_case
9
9
10
- ROWS_CHUNK_SIZE = 3000000
11
- ROWS_CHUNKS_COUNT = 100000
10
+ ROWS_CHUNK_SIZE = 1000000
11
+ ROWS_CHUNKS_COUNT = 50
12
12
13
13
14
14
class TestYdbWorkload (object ):
@@ -26,14 +26,14 @@ def teardown_class(cls):
26
26
cls .cluster .stop ()
27
27
28
28
def make_session (self ):
29
- driver = ydb .Driver (endpoint = f'grpc://localhost:{ self .cluster .nodes [1 ].grpc_port } ' , database = '/Root' )
29
+ driver = ydb .Driver (endpoint = f'grpc://localhost:{ self .cluster .nodes [1 ].grpc_port } ' , database = self . database_name )
30
30
session = ydb .QuerySessionPool (driver )
31
31
driver .wait (5 , fail_fast = True )
32
32
return session
33
33
34
34
def create_test_table (self , session , table ):
35
35
return session .execute_with_retries (f"""
36
- CREATE TABLE { table } (
36
+ CREATE TABLE ` { table } ` (
37
37
k Int32 NOT NULL,
38
38
v Uint64,
39
39
PRIMARY KEY (k)
@@ -46,7 +46,7 @@ def upsert_test_chunk(self, session, table, chunk_id, retries=10):
46
46
$values_list = ListReplicate(42ul, $n);
47
47
$rows_list = ListFoldMap($values_list, { chunk_id * ROWS_CHUNK_SIZE } , ($val, $i) -> ((<|k:$i, v:$val|>, $i + 1)));
48
48
49
- UPSERT INTO { table }
49
+ UPSERT INTO ` { table } `
50
50
SELECT * FROM AS_TABLE($rows_list);
51
51
""" , None , ydb .retries .RetrySettings (max_retries = retries ))
52
52
@@ -55,12 +55,17 @@ def upsert_until_overload(self, session, table):
55
55
for i in range (ROWS_CHUNKS_COUNT ):
56
56
res = self .upsert_test_chunk (session , table , i , retries = 0 )
57
57
print (f"upsert #{ i } ok, result:" , res , file = sys .stderr )
58
+ described = self .cluster .client .describe ('/Root' , '' )
59
+ print ('Quota exceeded {}' .format (described .PathDescription .DomainDescription .DomainState .DiskQuotaExceeded ), file = sys .stderr )
58
60
except ydb .issues .Overloaded :
59
61
print ('upsert: got overload issue' , file = sys .stderr )
62
+ except ydb .issues .Unavailable :
63
+ print ('upsert: got overload issue' , file = sys .stderr )
60
64
61
65
@link_test_case ("#13529" )
62
66
def test (self ):
63
67
"""As per https://github.com/ydb-platform/ydb/issues/13529"""
68
+ self .database_name = '/Root'
64
69
session = self .make_session ()
65
70
66
71
# Overflow the database
@@ -76,7 +81,7 @@ def test(self):
76
81
77
82
def delete_test_chunk (self , session , table , chunk_id , retries = 10 ):
78
83
session .execute_with_retries (f"""
79
- DELETE FROM { table }
84
+ DELETE FROM ` { table } `
80
85
WHERE { chunk_id * ROWS_CHUNK_SIZE } <= k AND k <= { chunk_id * ROWS_CHUNK_SIZE + ROWS_CHUNK_SIZE }
81
86
""" , None , ydb .retries .RetrySettings (max_retries = retries ))
82
87
@@ -85,9 +90,13 @@ def delete_until_overload(self, session, table):
85
90
try :
86
91
self .delete_test_chunk (session , table , i , retries = 0 )
87
92
print (f"delete #{ i } ok" , file = sys .stderr )
93
+ except ydb .issues .Unavailable :
94
+ print ('delete: got unavailable issue' , file = sys .stderr )
95
+ return i
88
96
except ydb .issues .Overloaded :
89
97
print ('delete: got overload issue' , file = sys .stderr )
90
98
return i
99
+ return ROWS_CHUNKS_COUNT
91
100
92
101
def ydbcli_db_schema_exec (self , node , operation_proto ):
93
102
endpoint = f"{ node .host } :{ node .port } "
@@ -122,24 +131,37 @@ def alter_database_quotas(self, node, database_path, database_quotas):
122
131
123
132
def test_delete (self ):
124
133
"""As per https://github.com/ydb-platform/ydb/issues/13653"""
125
- session = self .make_session ()
134
+ self .database_name = os .path .join ('/Root' , 'test' )
135
+ print ('Database name {}' .format (self .database_name ), file = sys .stderr )
136
+ self .cluster .create_database (
137
+ self .database_name ,
138
+ storage_pool_units_count = {
139
+ 'hdd' : 1
140
+ },
141
+ )
142
+ self .cluster .register_and_start_slots (self .database_name , count = 1 )
143
+ self .cluster .wait_tenant_up (self .database_name )
126
144
127
- # Set soft and hard quotas to 6GB
128
- self .alter_database_quotas (self .cluster .nodes [1 ], '/Root' , """
129
- data_size_hard_quota: 6000000000
130
- data_size_soft_quota: 6000000000
145
+ # Set soft and hard quotas to 40 Mb
146
+ self .alter_database_quotas (self .cluster .nodes [1 ], self . database_name , """
147
+ data_size_hard_quota: 40000000
148
+ data_size_soft_quota: 40000000
131
149
""" )
132
150
151
+ session = self .make_session ()
152
+
133
153
# Overflow the database
134
- self .create_test_table (session , 'huge' )
135
- self .upsert_until_overload (session , 'huge' )
154
+ table_path = os .path .join (self .database_name , 'huge' )
155
+ self .create_test_table (session , table_path )
156
+ self .upsert_until_overload (session , table_path )
136
157
137
158
# Check that deletion works at least first time
138
- # self.delete_test_chunk(session, 'huge', 0)
139
- # ^ uncomment after fixing https://github.com/ydb-platform/ydb/issues/13808
159
+ self .delete_test_chunk (session , table_path , 0 )
140
160
141
161
# Check that deletions will lead to overflow at some moment
142
- i = self .delete_until_overload (session , 'huge' )
162
+ i = self .delete_until_overload (session , table_path )
163
+
164
+ # Check that all DELETE statements are completed
165
+ assert i == ROWS_CHUNKS_COUNT
143
166
144
- # Try to wait until deletion works again (after compaction)
145
- self .delete_test_chunk (session , 'huge' , i )
167
+ # Writes enabling after data deletion will be checked in separate PR
0 commit comments