-
Notifications
You must be signed in to change notification settings - Fork 998
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
chore: Add stats print for slot migrations #4456
Changes from 4 commits
5296899
af30b4f
cdadbbe
f172f10
a76befd
3ef9827
422f175
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -224,6 +224,7 @@ void RestoreStreamer::Run() { | |
auto* blocking_counter = db_slice_->BlockingCounter(); | ||
std::lock_guard blocking_counter_guard(*blocking_counter); | ||
|
||
stats_.buckets_loop++; | ||
WriteBucket(it); | ||
}); | ||
|
||
|
@@ -232,10 +233,19 @@ void RestoreStreamer::Run() { | |
last_yield = 0; | ||
} | ||
} while (cursor); | ||
|
||
VLOG(1) << "RestoreStreamer finished loop of " << my_slots_.ToSlotRanges().ToString() | ||
<< ", shard " << db_slice_->shard_id() << ". Buckets looped " << stats_.buckets_loop; | ||
} | ||
|
||
void RestoreStreamer::SendFinalize(long attempt) { | ||
VLOG(1) << "RestoreStreamer LSN opcode for : " << db_slice_->shard_id() << " attempt " << attempt; | ||
VLOG(1) << "RestoreStreamer LSN of " << my_slots_.ToSlotRanges().ToString() << ", shard " | ||
<< db_slice_->shard_id() << " attempt " << attempt << " with " << stats_.commands | ||
<< " commands. Buckets looped " << stats_.buckets_loop << ", buckets on_db_update " | ||
<< stats_.buckets_on_db_update << ", buckets skipped " << stats_.buckets_skipped | ||
<< ", buckets written " << stats_.buckets_written << ". Keys skipped " | ||
<< stats_.keys_skipped << ", keys written " << stats_.keys_written; | ||
|
||
journal::Entry entry(journal::Op::LSN, attempt); | ||
|
||
io::StringSink sink; | ||
|
@@ -287,21 +297,28 @@ bool RestoreStreamer::ShouldWrite(SlotId slot_id) const { | |
|
||
void RestoreStreamer::WriteBucket(PrimeTable::bucket_iterator it) { | ||
if (it.GetVersion() < snapshot_version_) { | ||
stats_.buckets_written++; | ||
|
||
it.SetVersion(snapshot_version_); | ||
string key_buffer; // we can reuse it | ||
for (; !it.is_done(); ++it) { | ||
const auto& pv = it->second; | ||
string_view key = it->first.GetSlice(&key_buffer); | ||
if (ShouldWrite(key)) { | ||
stats_.keys_written++; | ||
uint64_t expire = 0; | ||
if (pv.HasExpire()) { | ||
auto eit = db_slice_->databases()[0]->expire.Find(it->first); | ||
expire = db_slice_->ExpireTime(eit); | ||
} | ||
|
||
WriteEntry(key, it->first, pv, expire); | ||
} else { | ||
stats_.keys_skipped++; | ||
} | ||
} | ||
} else { | ||
stats_.buckets_skipped++; | ||
} | ||
ThrottleIfNeeded(); | ||
} | ||
|
@@ -310,6 +327,8 @@ void RestoreStreamer::OnDbChange(DbIndex db_index, const DbSlice::ChangeReq& req | |
std::lock_guard guard(big_value_mu_); | ||
DCHECK_EQ(db_index, 0) << "Restore migration only allowed in cluster mode in db0"; | ||
|
||
stats_.buckets_on_db_update++; | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think that it will be better to count actually how many buckets where serialized on ondbchange i,e Write bucket will return true if the bucket was serialized false if skipped There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. To clarify, you want this in order to handle potential CVCUponInsert() handling multiple buckets? If so we can increase it there, if not, I think I did not understand your comment There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I want to understand that buckets where actually serialized from this flow and not only the command was called. Because if it is called after the bucket is serialized than we dont do anything (skip write bucket) There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. oh gotcha, sure thing! |
||
|
||
PrimeTable* table = db_slice_->GetTables(0).first; | ||
|
||
if (const PrimeTable::bucket_iterator* bit = req.update()) { | ||
|
@@ -331,7 +350,7 @@ void RestoreStreamer::WriteEntry(string_view key, const PrimeValue& pk, const Pr | |
ThrottleIfNeeded(); | ||
}, | ||
ServerState::tlocal()->serialization_max_chunk_size); | ||
serializer.SerializeEntry(key, pk, pv, expire_ms); | ||
stats_.commands += serializer.SerializeEntry(key, pk, pv, expire_ms); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This actually counting the number of flush to sync that we have right (number of times you call commit)? not the number of commands generated from each entry. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Excluding timeout and stickiness they should be the same There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ok I see now so this is not actually the number of times we actually flush to sync but the time we write to the pending_buffer. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes |
||
} | ||
|
||
} // namespace dfly |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
After I dive into the code again related to your above comment of flush is the same as the number of commands. But this is not true. We actually accumulating the commands. You dont know if the data was actually written to socket
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
renamed to
commands