Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

healthcheck merge/stable 25 1 #16023

Open
wants to merge 7 commits into
base: stable-25-1
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions ydb/core/base/appdata.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ struct TAppData::TImpl {
NKikimrReplication::TReplicationDefaults ReplicationConfig;
NKikimrProto::TDataIntegrityTrailsConfig DataIntegrityTrailsConfig;
NKikimrConfig::TDataErasureConfig DataErasureConfig;
NKikimrConfig::THealthCheckConfig HealthCheckConfig;
};

TAppData::TAppData(
Expand Down Expand Up @@ -127,6 +128,7 @@ TAppData::TAppData(
, ReplicationConfig(Impl->ReplicationConfig)
, DataIntegrityTrailsConfig(Impl->DataIntegrityTrailsConfig)
, DataErasureConfig(Impl->DataErasureConfig)
, HealthCheckConfig(Impl->HealthCheckConfig)
, KikimrShouldContinue(kikimrShouldContinue)
, TracingConfigurator(MakeIntrusive<NJaegerTracing::TSamplingThrottlingConfigurator>(TimeProvider, RandomProvider))
{}
Expand Down
2 changes: 2 additions & 0 deletions ydb/core/base/appdata_fwd.h
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ namespace NKikimrConfig {
class TMetadataCacheConfig;
class TMemoryControllerConfig;
class TFeatureFlags;
class THealthCheckConfig;
}

namespace NKikimrReplication {
Expand Down Expand Up @@ -242,6 +243,7 @@ struct TAppData {
NKikimrReplication::TReplicationDefaults& ReplicationConfig;
NKikimrProto::TDataIntegrityTrailsConfig& DataIntegrityTrailsConfig;
NKikimrConfig::TDataErasureConfig& DataErasureConfig;
NKikimrConfig::THealthCheckConfig& HealthCheckConfig;
bool EnforceUserTokenRequirement = false;
bool EnforceUserTokenCheckRequirement = false; // check token if it was specified
bool AllowHugeKeyValueDeletes = true; // delete when all clients limit deletes per request
Expand Down
66 changes: 44 additions & 22 deletions ydb/core/blobstorage/ut_blobstorage/sanitize_groups.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ Y_UNIT_TEST_SUITE(GroupLayoutSanitizer) {
}
}

void CreateEnv(std::unique_ptr<TEnvironmentSetup>& env, std::vector<TNodeLocation>& locations) {
TBlobStorageGroupType groupType = TBlobStorageGroupType::ErasureMirror3dc;
void CreateEnv(std::unique_ptr<TEnvironmentSetup>& env, std::vector<TNodeLocation>& locations,
TBlobStorageGroupType groupType) {
const ui32 numNodes = locations.size();

env.reset(new TEnvironmentSetup({
Expand All @@ -37,39 +37,49 @@ Y_UNIT_TEST_SUITE(GroupLayoutSanitizer) {

const ui32 disksPerNode = 1;
const ui32 slotsPerDisk = 3;

env->Runtime->FilterFunction = CatchSanitizeRequests;
env->CreateBoxAndPool(disksPerNode, numNodes * disksPerNode * slotsPerDisk / 9);
env->Runtime->FilterFunction = {};
}

Y_UNIT_TEST(Test3dc) {
NActorsInterconnect::TNodeLocation LocationGenerator(ui32 dc, ui32 rack, ui32 unit) {
NActorsInterconnect::TNodeLocation proto;
proto.SetDataCenter(ToString(dc));
proto.SetRack(ToString(rack));
proto.SetUnit(ToString(unit));
return proto;
}

void Test(TBlobStorageGroupType groupType, ui32 dcs, ui32 racks, ui32 units) {
std::vector<TNodeLocation> locations;
TLocationGenerator locationGenerator = [](ui32 dc, ui32 rack, ui32 unit) {
NActorsInterconnect::TNodeLocation proto;
proto.SetDataCenter(ToString(dc));
proto.SetRack(ToString(rack));
proto.SetUnit(ToString(unit));
return proto;
};

MakeLocations(locations, 3, 5, 1, locationGenerator);
MakeLocations(locations, dcs, racks, units, LocationGenerator);
std::unique_ptr<TEnvironmentSetup> env;
CreateEnv(env, locations);

TBlobStorageGroupType groupType = TBlobStorageGroupType::ErasureMirror3dc;
TGroupGeometryInfo geom = CreateGroupGeometry(groupType);
CreateEnv(env, locations, groupType);


// Assure that sanitizer doesn't send request to initially allocated groups
env->Runtime->FilterFunction = CatchSanitizeRequests;
env->UpdateSettings(true, false, true);
env->Sim(TDuration::Minutes(3));
env->UpdateSettings(false, false, false);

TGroupGeometryInfo geom = CreateGroupGeometry(groupType);

TString error;
auto cfg = env->FetchBaseConfig();
UNIT_ASSERT_C(CheckBaseConfigLayout(geom, cfg, true, error), error);
env->Cleanup();

// Shuffle node locayion, assure that layout error occured
std::random_shuffle(locations.begin(), locations.end());
env->Initialize();
env->Sim(TDuration::Seconds(100));
cfg = env->FetchBaseConfig();
CheckBaseConfigLayout(geom, cfg, true, error);
do {
env->Cleanup();
std::random_shuffle(locations.begin(), locations.end());
env->Initialize();
env->Sim(TDuration::Seconds(100));
cfg = env->FetchBaseConfig();
} while (CheckBaseConfigLayout(geom, cfg, true, error));
Cerr << error << Endl;

// Sanitize groups
Expand All @@ -86,6 +96,18 @@ Y_UNIT_TEST_SUITE(GroupLayoutSanitizer) {
UNIT_ASSERT_C(CheckBaseConfigLayout(geom, cfg, true, error), error);
}

Y_UNIT_TEST(Test3dc) {
Test(TBlobStorageGroupType::ErasureMirror3dc, 3, 5, 1);
}

Y_UNIT_TEST(TestBlock4Plus2) {
Test(TBlobStorageGroupType::Erasure4Plus2Block, 1, 10, 2);
}

Y_UNIT_TEST(TestMirror3of4) {
Test(TBlobStorageGroupType::ErasureMirror3of4, 1, 10, 2);
}

TString PrintGroups(TBlobStorageGroupType groupType, const NKikimrBlobStorage::TBaseConfig& cfg,
std::vector<TNodeLocation> locations) {
TGroupGeometryInfo geom = CreateGroupGeometry(groupType);
Expand Down Expand Up @@ -137,6 +159,7 @@ Y_UNIT_TEST_SUITE(GroupLayoutSanitizer) {
}

void TestMultipleRealmsOccupation(bool allowMultipleRealmsOccupation) {
TBlobStorageGroupType groupType = TBlobStorageGroupType::ErasureMirror3dc;
std::vector<TNodeLocation> locations;
TLocationGenerator locationGenerator = [](ui32 dc, ui32 rack, ui32 unit) {
NActorsInterconnect::TNodeLocation proto;
Expand All @@ -152,9 +175,8 @@ Y_UNIT_TEST_SUITE(GroupLayoutSanitizer) {
};
MakeLocations(locations, 4, 5, 1, locationGenerator);
std::unique_ptr<TEnvironmentSetup> env;
CreateEnv(env, locations);
CreateEnv(env, locations, groupType);

TBlobStorageGroupType groupType = TBlobStorageGroupType::ErasureMirror3dc;
TGroupGeometryInfo geom = CreateGroupGeometry(groupType);

env->Runtime->FilterFunction = CatchSanitizeRequests;
Expand Down
1 change: 1 addition & 0 deletions ydb/core/cms/console/configs_dispatcher.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ const THashSet<ui32> DYNAMIC_KINDS({
(ui32)NKikimrConsole::TConfigItem::BlobStorageConfigItem,
(ui32)NKikimrConsole::TConfigItem::MetadataCacheConfigItem,
(ui32)NKikimrConsole::TConfigItem::MemoryControllerConfigItem,
(ui32)NKikimrConsole::TConfigItem::HealthCheckConfigItem,
});

const THashSet<ui32> NON_YAML_KINDS({
Expand Down
4 changes: 4 additions & 0 deletions ydb/core/driver_lib/run/run.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1205,6 +1205,10 @@ void TKikimrRunner::InitializeAppData(const TKikimrRunConfig& runConfig)
AppData->ReplicationConfig = runConfig.AppConfig.GetReplicationConfig();
}

if (runConfig.AppConfig.HasHealthCheckConfig()) {
AppData->HealthCheckConfig = runConfig.AppConfig.GetHealthCheckConfig();
}

// setup resource profiles
AppData->ResourceProfiles = new TResourceProfiles;
if (runConfig.AppConfig.GetBootstrapConfig().ResourceProfilesSize())
Expand Down
51 changes: 38 additions & 13 deletions ydb/core/health_check/health_check.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@
#include <ydb/core/base/path.h>
#include <ydb/core/base/statestorage.h>
#include <ydb/core/base/tablet_pipe.h>
#include <ydb/core/cms/console/configs_dispatcher.h>
#include <ydb/core/mon/mon.h>
#include <ydb/core/base/nameservice.h>
#include <ydb/core/blobstorage/base/blobstorage_events.h>
Expand All @@ -28,6 +29,7 @@
#include <ydb/core/util/tuples.h>

#include <ydb/core/protos/blobstorage_distributed_config.pb.h>
#include <ydb/core/protos/config.pb.h>
#include <ydb/core/sys_view/common/events.h>

#include <ydb/public/api/grpc/ydb_monitoring_v1.grpc.pb.h>
Expand Down Expand Up @@ -121,11 +123,12 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
ui64 Cookie;
NWilson::TSpan Span;

TSelfCheckRequest(const TActorId& sender, THolder<TEvSelfCheckRequest> request, ui64 cookie, NWilson::TTraceId&& traceId)
TSelfCheckRequest(const TActorId& sender, THolder<TEvSelfCheckRequest> request, ui64 cookie, NWilson::TTraceId&& traceId, const NKikimrConfig::THealthCheckConfig& config)
: Sender(sender)
, Request(std::move(request))
, Cookie(cookie)
, Span(TComponentTracingLevels::TTablet::Basic, std::move(traceId), "health_check", NWilson::EFlags::AUTO_END)
, HealthCheckConfig(config)
{}

using TGroupId = ui32;
Expand Down Expand Up @@ -163,7 +166,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
struct TNodeTabletState {
struct TTabletStateSettings {
TInstant AliveBarrier;
ui32 MaxRestartsPerPeriod = 30; // per hour
ui32 MaxRestartsPerPeriod; // per hour
ui32 MaxTabletIdsStored = 10;
bool ReportGoodTabletsIds = false;
};
Expand Down Expand Up @@ -266,6 +269,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
TString ErasureSpecies;
std::vector<const NKikimrSysView::TVSlotEntry*> VSlots;
ui32 Generation;
bool LayoutCorrect = true;
};

struct TSelfCheckResult {
Expand Down Expand Up @@ -647,6 +651,8 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
std::optional<TRequestResponse<TEvStateStorage::TEvBoardInfo>> DatabaseBoardInfo;
THashSet<TNodeId> UnknownStaticGroups;

const NKikimrConfig::THealthCheckConfig& HealthCheckConfig;

std::vector<TNodeId> SubscribedNodeIds;
THashSet<TNodeId> StorageNodeIds;
THashSet<TNodeId> ComputeNodeIds;
Expand Down Expand Up @@ -742,7 +748,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {

TTabletRequestsState TabletRequests;

TDuration Timeout = TDuration::MilliSeconds(20000);
TDuration Timeout = TDuration::MilliSeconds(HealthCheckConfig.GetTimeout());
static constexpr TStringBuf STATIC_STORAGE_POOL_NAME = "static";

bool IsSpecificDatabaseFilter() const {
Expand Down Expand Up @@ -1504,6 +1510,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
for (const auto& [hiveId, hiveResponse] : HiveInfo) {
if (hiveResponse.IsOk()) {
settings.AliveBarrier = TInstant::MilliSeconds(hiveResponse->Record.GetResponseTimestamp()) - TDuration::Minutes(5);
settings.MaxRestartsPerPeriod = HealthCheckConfig.GetThresholds().GetTabletsRestartsOrange();
for (const NKikimrHive::TTabletInfo& hiveTablet : hiveResponse->Record.GetTablets()) {
TSubDomainKey tenantId = TSubDomainKey(hiveTablet.GetObjectDomain());
auto itDomain = FilterDomainKey.find(tenantId);
Expand Down Expand Up @@ -1569,6 +1576,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
auto& groupState = GroupState[groupId];
groupState.ErasureSpecies = group.GetInfo().GetErasureSpeciesV2();
groupState.Generation = group.GetInfo().GetGeneration();
groupState.LayoutCorrect = group.GetInfo().GetLayoutCorrect();
StoragePoolState[poolId].Groups.emplace(groupId);
}
for (const auto& vSlot : VSlots->Get()->Record.GetEntries()) {
Expand Down Expand Up @@ -1729,9 +1737,9 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
FillNodeInfo(nodeId, context.Location.mutable_compute()->mutable_node());

TSelfCheckContext rrContext(&context, "NODE_UPTIME");
if (databaseState.NodeRestartsPerPeriod[nodeId] >= 30) {
if (databaseState.NodeRestartsPerPeriod[nodeId] >= HealthCheckConfig.GetThresholds().GetNodeRestartsOrange()) {
rrContext.ReportStatus(Ydb::Monitoring::StatusFlag::ORANGE, "Node is restarting too often", ETags::Uptime);
} else if (databaseState.NodeRestartsPerPeriod[nodeId] >= 10) {
} else if (databaseState.NodeRestartsPerPeriod[nodeId] >= HealthCheckConfig.GetThresholds().GetNodeRestartsYellow()) {
rrContext.ReportStatus(Ydb::Monitoring::StatusFlag::YELLOW, "The number of node restarts has increased", ETags::Uptime);
} else {
rrContext.ReportStatus(Ydb::Monitoring::StatusFlag::GREEN);
Expand Down Expand Up @@ -1769,9 +1777,9 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
long timeDifferenceUs = nodeSystemState.GetMaxClockSkewWithPeerUs();
TDuration timeDifferenceDuration = TDuration::MicroSeconds(abs(timeDifferenceUs));
Ydb::Monitoring::StatusFlag::Status status;
if (timeDifferenceDuration > MAX_CLOCKSKEW_ORANGE_ISSUE_TIME) {
if (timeDifferenceDuration > TDuration::MicroSeconds(HealthCheckConfig.GetThresholds().GetNodesTimeDifferenceOrange())) {
status = Ydb::Monitoring::StatusFlag::ORANGE;
} else if (timeDifferenceDuration > MAX_CLOCKSKEW_YELLOW_ISSUE_TIME) {
} else if (timeDifferenceDuration > TDuration::MicroSeconds(HealthCheckConfig.GetThresholds().GetNodesTimeDifferenceYellow())) {
status = Ydb::Monitoring::StatusFlag::YELLOW;
} else {
status = Ydb::Monitoring::StatusFlag::GREEN;
Expand Down Expand Up @@ -2343,6 +2351,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {

class TGroupChecker {
TString ErasureSpecies;
bool LayoutCorrect;
int FailedDisks = 0;
std::array<int, Ydb::Monitoring::StatusFlag::Status_ARRAYSIZE> DisksColors = {};
TStackVec<std::pair<ui32, int>> FailedRealms;
Expand All @@ -2359,7 +2368,10 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
}

public:
TGroupChecker(const TString& erasure) : ErasureSpecies(erasure) {}
TGroupChecker(const TString& erasure, const bool layoutCorrect = true)
: ErasureSpecies(erasure)
, LayoutCorrect(layoutCorrect)
{}

void AddVDiskStatus(Ydb::Monitoring::StatusFlag::Status status, ui32 realm) {
++DisksColors[status];
Expand All @@ -2378,6 +2390,9 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {

void ReportStatus(TSelfCheckContext& context) const {
context.OverallStatus = Ydb::Monitoring::StatusFlag::GREEN;
if (!LayoutCorrect) {
context.ReportStatus(Ydb::Monitoring::StatusFlag::ORANGE, "Group layout is incorrect", ETags::GroupState);
}
if (ErasureSpecies == NONE) {
if (FailedDisks > 0) {
context.ReportStatus(Ydb::Monitoring::StatusFlag::RED, "Group failed", ETags::GroupState, {ETags::VDiskState});
Expand Down Expand Up @@ -2727,7 +2742,7 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
return;
}

TGroupChecker checker(itGroup->second.ErasureSpecies);
TGroupChecker checker(itGroup->second.ErasureSpecies, itGroup->second.LayoutCorrect);
const auto& slots = itGroup->second.VSlots;
for (const auto* slot : slots) {
const auto& slotInfo = slot->GetInfo();
Expand Down Expand Up @@ -2921,9 +2936,6 @@ class TSelfCheckRequest : public TActorBootstrapped<TSelfCheckRequest> {
}
}

const TDuration MAX_CLOCKSKEW_ORANGE_ISSUE_TIME = TDuration::MicroSeconds(25000);
const TDuration MAX_CLOCKSKEW_YELLOW_ISSUE_TIME = TDuration::MicroSeconds(5000);

void FillResult(TOverallStateContext context) {
if (IsSpecificDatabaseFilter()) {
FillDatabaseResult(context, FilterDatabase, DatabaseState[FilterDatabase]);
Expand Down Expand Up @@ -3252,12 +3264,16 @@ void TNodeCheckRequest<NMon::TEvHttpInfo>::Bootstrap() {
class THealthCheckService : public TActorBootstrapped<THealthCheckService> {
public:
static constexpr NKikimrServices::TActivity::EType ActorActivityType() { return NKikimrServices::TActivity::MONITORING_SERVICE; }
NKikimrConfig::THealthCheckConfig HealthCheckConfig;

THealthCheckService()
{
}

void Bootstrap() {
HealthCheckConfig.CopyFrom(AppData()->HealthCheckConfig);
Send(NConsole::MakeConfigsDispatcherID(SelfId().NodeId()),
new NConsole::TEvConfigsDispatcher::TEvSetConfigSubscriptionRequest({NKikimrConsole::TConfigItem::HealthCheckConfigItem}));
TMon* mon = AppData()->Mon;
if (mon) {
mon->RegisterActorPage({
Expand All @@ -3270,8 +3286,16 @@ class THealthCheckService : public TActorBootstrapped<THealthCheckService> {
Become(&THealthCheckService::StateWork);
}

void Handle(NConsole::TEvConsole::TEvConfigNotificationRequest::TPtr& ev) {
const auto& record = ev->Get()->Record;
if (record.GetConfig().HasHealthCheckConfig()) {
HealthCheckConfig.CopyFrom(record.GetConfig().GetHealthCheckConfig());
}
Send(ev->Sender, new NConsole::TEvConsole::TEvConfigNotificationResponse(record), 0, ev->Cookie);
}

void Handle(TEvSelfCheckRequest::TPtr& ev) {
Register(new TSelfCheckRequest(ev->Sender, ev.Get()->Release(), ev->Cookie, std::move(ev->TraceId)));
Register(new TSelfCheckRequest(ev->Sender, ev.Get()->Release(), ev->Cookie, std::move(ev->TraceId), HealthCheckConfig));
}

std::shared_ptr<NYdbGrpc::TGRpcClientLow> GRpcClientLow;
Expand Down Expand Up @@ -3299,6 +3323,7 @@ class THealthCheckService : public TActorBootstrapped<THealthCheckService> {
hFunc(TEvSelfCheckRequest, Handle);
hFunc(TEvNodeCheckRequest, Handle);
hFunc(NMon::TEvHttpInfo, Handle);
hFunc(NConsole::TEvConsole::TEvConfigNotificationRequest, Handle);
cFunc(TEvents::TSystem::PoisonPill, PassAway);
}
}
Expand Down
Loading
Loading