Skip to content

8238687: Investigate memory uncommit during young collections in G1 #25832

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 4 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/hotspot/share/gc/g1/g1Analytics.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ class G1Analytics: public CHeapObj<mtGC> {
return _short_term_pause_time_ratio;
}

uint number_of_recorded_pause_times() const {
uint max_num_of_recorded_pause_times() const {
return NumPrevPausesForHeuristics;
}

Expand Down
4 changes: 2 additions & 2 deletions src/hotspot/share/gc/g1/g1Arguments.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -196,8 +196,8 @@ void G1Arguments::initialize() {
if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
// In G1, we want the default GC overhead goal to be higher than
// it is for PS, or the heap might be expanded too aggressively.
// We set it here to ~8%.
FLAG_SET_DEFAULT(GCTimeRatio, 12);
// We set it here to 4%.
FLAG_SET_DEFAULT(GCTimeRatio, 24);
}

// Below, we might need to calculate the pause time interval based on
Expand Down
125 changes: 83 additions & 42 deletions src/hotspot/share/gc/g1/g1CollectedHeap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -795,8 +795,7 @@ void G1CollectedHeap::prepare_for_mutator_after_full_collection(size_t allocatio
assert(num_free_regions() == 0, "we should not have added any free regions");
rebuild_region_sets(false /* free_list_only */);
abort_refinement();
resize_heap_if_necessary(allocation_word_size);
uncommit_regions_if_necessary();
resize_heap_after_full_collection(allocation_word_size);

// Rebuild the code root lists for each region
rebuild_code_roots();
Expand Down Expand Up @@ -879,21 +878,41 @@ void G1CollectedHeap::upgrade_to_full_collection() {
size_t(0) /* allocation_word_size */);
}

void G1CollectedHeap::resize_heap_if_necessary(size_t allocation_word_size) {

void G1CollectedHeap::resize_heap(size_t resize_bytes, bool should_expand) {
if (should_expand) {
expand(resize_bytes, _workers);
} else {
shrink(resize_bytes);
uncommit_regions_if_necessary();
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I wonder if it's more symmetric if this uncommit logic is inlined to shrink.

}
}

void G1CollectedHeap::resize_heap_after_full_collection(size_t allocation_word_size) {
assert_at_safepoint_on_vm_thread();

bool should_expand;
size_t resize_amount = _heap_sizing_policy->full_collection_resize_amount(should_expand, allocation_word_size);
size_t resize_bytes = _heap_sizing_policy->full_collection_resize_amount(should_expand, allocation_word_size);

if (resize_amount == 0) {
return;
} else if (should_expand) {
expand(resize_amount, _workers);
} else {
shrink(resize_amount);
if (resize_bytes != 0) {
resize_heap(resize_bytes, should_expand);
}
}

void G1CollectedHeap::resize_heap_after_young_collection(size_t allocation_word_size) {
Ticks start = Ticks::now();

bool should_expand;

size_t resize_bytes = _heap_sizing_policy->young_collection_resize_amount(should_expand, allocation_word_size);

if (resize_bytes != 0) {
resize_heap(resize_bytes, should_expand);
}

phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0);
}

HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size,
bool do_gc,
bool maximal_compaction,
Expand Down Expand Up @@ -1005,18 +1024,21 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_worker
size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes);
aligned_expand_bytes = align_up(aligned_expand_bytes, G1HeapRegion::GrainBytes);

log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: %zuB expansion amount: %zuB",
expand_bytes, aligned_expand_bytes);
uint num_regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes);
assert(num_regions_to_expand > 0, "Must expand by at least one region");

log_debug(gc, ergo, heap)("Heap resize. Requested expansion amount: %zuB aligned expansion amount: %zuB (%u regions)",
expand_bytes, aligned_expand_bytes, num_regions_to_expand);

if (num_inactive_regions() == 0) {
log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)");
log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)");
return false;
}

uint regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes);
assert(regions_to_expand > 0, "Must expand by at least one region");

uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers);

uint expanded_by = _hrm.expand_by(num_regions_to_expand, pretouch_workers);

assert(expanded_by > 0, "must have failed during commit.");

size_t actual_expand_bytes = expanded_by * G1HeapRegion::GrainBytes;
Expand All @@ -1040,24 +1062,45 @@ bool G1CollectedHeap::expand_single_region(uint node_index) {
}

void G1CollectedHeap::shrink_helper(size_t shrink_bytes) {
size_t aligned_shrink_bytes = os::align_down_vm_page_size(shrink_bytes);
aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes);
assert(shrink_bytes > 0, "must be");
assert(is_aligned(shrink_bytes, G1HeapRegion::GrainBytes),
"Shrink request for %zuB not aligned to heap region size %zuB",
shrink_bytes, G1HeapRegion::GrainBytes);

uint num_regions_to_remove = (uint)(shrink_bytes / G1HeapRegion::GrainBytes);

uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove);
size_t shrunk_bytes = num_regions_removed * G1HeapRegion::GrainBytes;

log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: %zuB aligned shrinking amount: %zuB actual amount shrunk: %zuB",
shrink_bytes, aligned_shrink_bytes, shrunk_bytes);
log_debug(gc, ergo, heap)("Heap resize. Requested shrinking amount: %zuB actual shrinking amount: %zuB (%u regions)",
shrink_bytes, shrunk_bytes, num_regions_removed);
if (num_regions_removed > 0) {
log_debug(gc, heap)("Uncommittable regions after shrink: %u", num_regions_removed);
policy()->record_new_heap_size(num_committed_regions());
} else {
log_debug(gc, ergo, heap)("Did not shrink the heap (heap shrinking operation failed)");
log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap shrinking operation failed)");
}
}

void G1CollectedHeap::shrink(size_t shrink_bytes) {
if (capacity() == min_capacity()) {
log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap already at minimum)");
return;
}

size_t aligned_shrink_bytes = os::align_down_vm_page_size(shrink_bytes);
aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes);

aligned_shrink_bytes = capacity() - MAX2(capacity() - aligned_shrink_bytes, min_capacity());
assert(is_aligned(aligned_shrink_bytes, G1HeapRegion::GrainBytes), "Bytes to shrink %zuB not aligned", aligned_shrink_bytes);

log_debug(gc, ergo, heap)("Heap resize. Requested shrink amount: %zuB aligned shrink amount: %zuB",
shrink_bytes, aligned_shrink_bytes);

if (aligned_shrink_bytes == 0) {
log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (shrink request too small)");
return;
}

_verifier->verify_region_sets_optional();

// We should only reach here at the end of a Full GC or during Remark which
Expand All @@ -1069,7 +1112,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) {
// could instead use the remove_all_pending() method on free_list to
// remove only the ones that we need to remove.
_hrm.remove_all_free_regions();
shrink_helper(shrink_bytes);
shrink_helper(aligned_shrink_bytes);
rebuild_region_sets(true /* free_list_only */);

_hrm.verify_optional();
Expand Down Expand Up @@ -1335,7 +1378,7 @@ jint G1CollectedHeap::initialize() {
}

os::trace_page_sizes("Heap",
MinHeapSize,
min_capacity(),
reserved_byte_size,
heap_rs.base(),
heap_rs.size(),
Expand Down Expand Up @@ -2021,7 +2064,7 @@ bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const {
}

size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const {
return (_policy->young_list_target_length() - _survivor.length()) * G1HeapRegion::GrainBytes;
return eden_target_length() * G1HeapRegion::GrainBytes;
}

size_t G1CollectedHeap::tlab_used(Thread* ignored) const {
Expand All @@ -2042,6 +2085,10 @@ size_t G1CollectedHeap::max_capacity() const {
return max_num_regions() * G1HeapRegion::GrainBytes;
}

size_t G1CollectedHeap::min_capacity() const {
return MinHeapSize;
}

void G1CollectedHeap::prepare_for_verify() {
_verifier->prepare_for_verify();
}
Expand Down Expand Up @@ -2387,24 +2434,11 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType
phase_times()->record_verify_after_time_ms((Ticks::now() - start).seconds() * MILLIUNITS);
}

void G1CollectedHeap::expand_heap_after_young_collection(){
size_t expand_bytes = _heap_sizing_policy->young_collection_expansion_amount();
if (expand_bytes > 0) {
// No need for an ergo logging here,
// expansion_amount() does this when it returns a value > 0.
Ticks expand_start = Ticks::now();
if (expand(expand_bytes, _workers)) {
double expand_ms = (Ticks::now() - expand_start).seconds() * MILLIUNITS;
phase_times()->record_expand_heap_time(expand_ms);
}
}
}

void G1CollectedHeap::do_collection_pause_at_safepoint() {
void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_size) {
assert_at_safepoint_on_vm_thread();
guarantee(!is_stw_gc_active(), "collection is not reentrant");

do_collection_pause_at_safepoint_helper();
do_collection_pause_at_safepoint_helper(allocation_word_size);
}

G1HeapPrinterMark::G1HeapPrinterMark(G1CollectedHeap* g1h) : _g1h(g1h), _heap_transition(g1h) {
Expand Down Expand Up @@ -2468,7 +2502,7 @@ void G1CollectedHeap::flush_region_pin_cache() {
}
}

void G1CollectedHeap::do_collection_pause_at_safepoint_helper() {
void G1CollectedHeap::do_collection_pause_at_safepoint_helper(size_t allocation_word_size) {
ResourceMark rm;

IsSTWGCActiveMark active_gc_mark;
Expand All @@ -2486,7 +2520,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper() {
bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc();

// Perform the collection.
G1YoungCollector collector(gc_cause());
G1YoungCollector collector(gc_cause(), allocation_word_size);
collector.collect();

// It should now be safe to tell the concurrent mark thread to start
Expand Down Expand Up @@ -2608,6 +2642,13 @@ void G1CollectedHeap::set_young_gen_card_set_stats(const G1MonotonicArenaMemoryS

void G1CollectedHeap::record_obj_copy_mem_stats() {
size_t total_old_allocated = _old_evac_stats.allocated() + _old_evac_stats.direct_allocated();
uint total_allocated = _survivor_evac_stats.regions_filled() + _old_evac_stats.regions_filled();

log_debug(gc)("Allocated %u survivor %u old percent total %1.2f%% (%u%%)",
_survivor_evac_stats.regions_filled(), _old_evac_stats.regions_filled(),
percent_of(total_allocated, num_committed_regions() - total_allocated),
G1ReservePercent);

policy()->old_gen_alloc_tracker()->
add_allocated_bytes_since_last_gc(total_old_allocated * HeapWordSize);

Expand Down
14 changes: 9 additions & 5 deletions src/hotspot/share/gc/g1/g1CollectedHeap.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -558,7 +558,9 @@ class G1CollectedHeap : public CollectedHeap {
void pin_object(JavaThread* thread, oop obj) override;
void unpin_object(JavaThread* thread, oop obj) override;

void resize_heap_if_necessary(size_t allocation_word_size);
void resize_heap_after_young_collection(size_t allocation_word_size);
void resize_heap_after_full_collection(size_t allocation_word_size);
void resize_heap(size_t resize_bytes, bool should_expand);

// Check if there is memory to uncommit and if so schedule a task to do it.
void uncommit_regions_if_necessary();
Expand Down Expand Up @@ -743,11 +745,11 @@ class G1CollectedHeap : public CollectedHeap {
// followed by a by-policy upgrade to a full collection.
// precondition: at safepoint on VM thread
// precondition: !is_stw_gc_active()
void do_collection_pause_at_safepoint();
void do_collection_pause_at_safepoint(size_t allocation_word_size = 0);

// Helper for do_collection_pause_at_safepoint, containing the guts
// of the incremental collection pause, executed by the vm thread.
void do_collection_pause_at_safepoint_helper();
void do_collection_pause_at_safepoint_helper(size_t allocation_word_size);

void verify_before_young_collection(G1HeapVerifier::G1VerifyType type);
void verify_after_young_collection(G1HeapVerifier::G1VerifyType type);
Expand All @@ -764,8 +766,6 @@ class G1CollectedHeap : public CollectedHeap {
// Must be called before any decision based on pin counts.
void flush_region_pin_cache();

void expand_heap_after_young_collection();
// Update object copying statistics.
void record_obj_copy_mem_stats();

private:
Expand Down Expand Up @@ -1022,6 +1022,8 @@ class G1CollectedHeap : public CollectedHeap {

void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);

bool last_gc_was_periodic() { return _gc_lastcause == GCCause::_g1_periodic_collection; }

void remove_from_old_gen_sets(const uint old_regions_removed,
const uint humongous_regions_removed);
void prepend_to_freelist(G1FreeRegionList* list);
Expand Down Expand Up @@ -1190,6 +1192,7 @@ class G1CollectedHeap : public CollectedHeap {

// Print the maximum heap capacity.
size_t max_capacity() const override;
size_t min_capacity() const;

Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; }

Expand All @@ -1204,6 +1207,7 @@ class G1CollectedHeap : public CollectedHeap {

G1SurvivorRegions* survivor() { return &_survivor; }

inline uint eden_target_length() const;
uint eden_regions_count() const { return _eden.length(); }
uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); }
uint survivor_regions_count() const { return _survivor.length(); }
Expand Down
6 changes: 5 additions & 1 deletion src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
Expand Down Expand Up @@ -316,4 +316,8 @@ inline bool G1CollectedHeap::is_collection_set_candidate(const G1HeapRegion* r)
return candidates->contains(r);
}

inline uint G1CollectedHeap::eden_target_length() const {
return _policy->young_list_target_length() - survivor_regions_count();
}

#endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP
5 changes: 3 additions & 2 deletions src/hotspot/share/gc/g1/g1ConcurrentMark.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1461,8 +1461,9 @@ void G1ConcurrentMark::remark() {
// GC pause.
_g1h->increment_total_collections();

_g1h->resize_heap_if_necessary(size_t(0) /* allocation_word_size */);
_g1h->uncommit_regions_if_necessary();
if (_g1h->last_gc_was_periodic()) {
_g1h->resize_heap_after_full_collection(size_t(0) /* allocation_word_size */);
}

compute_new_sizes();

Expand Down
6 changes: 3 additions & 3 deletions src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ void G1GCPhaseTimes::reset() {
_cur_pre_evacuate_prepare_time_ms = 0.0;
_cur_post_evacuate_cleanup_1_time_ms = 0.0;
_cur_post_evacuate_cleanup_2_time_ms = 0.0;
_cur_expand_heap_time_ms = 0.0;
_cur_resize_heap_time_ms = 0.0;
_cur_ref_proc_time_ms = 0.0;
_cur_collection_start_sec = 0.0;
_root_region_scan_wait_time_ms = 0.0;
Expand Down Expand Up @@ -488,7 +488,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
_cur_post_evacuate_cleanup_2_time_ms +
_recorded_total_rebuild_freelist_time_ms +
_recorded_prepare_for_mutator_time_ms +
_cur_expand_heap_time_ms;
_cur_resize_heap_time_ms;

info_time("Post Evacuate Collection Set", sum_ms);

Expand Down Expand Up @@ -537,7 +537,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed
trace_phase(_gc_par_phases[RebuildFreeList]);

debug_time("Prepare For Mutator", _recorded_prepare_for_mutator_time_ms);
debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms);
debug_time("Resize Heap After Collection", _cur_resize_heap_time_ms);

return sum_ms;
}
Expand Down
Loading