diff --git a/src/hotspot/share/gc/g1/g1Analytics.hpp b/src/hotspot/share/gc/g1/g1Analytics.hpp index d4c6463b486ab..d0e1b129b8644 100644 --- a/src/hotspot/share/gc/g1/g1Analytics.hpp +++ b/src/hotspot/share/gc/g1/g1Analytics.hpp @@ -110,7 +110,7 @@ class G1Analytics: public CHeapObj { return _short_term_pause_time_ratio; } - uint number_of_recorded_pause_times() const { + uint max_num_of_recorded_pause_times() const { return NumPrevPausesForHeuristics; } diff --git a/src/hotspot/share/gc/g1/g1Arguments.cpp b/src/hotspot/share/gc/g1/g1Arguments.cpp index bd156a69fe6fc..ee91c3273375f 100644 --- a/src/hotspot/share/gc/g1/g1Arguments.cpp +++ b/src/hotspot/share/gc/g1/g1Arguments.cpp @@ -196,8 +196,8 @@ void G1Arguments::initialize() { if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) { // In G1, we want the default GC overhead goal to be higher than // it is for PS, or the heap might be expanded too aggressively. - // We set it here to ~8%. - FLAG_SET_DEFAULT(GCTimeRatio, 12); + // We set it here to 4%. + FLAG_SET_DEFAULT(GCTimeRatio, 24); } // Below, we might need to calculate the pause time interval based on diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index b6c18420b82fc..8e0ec4af7ec87 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -795,8 +795,7 @@ void G1CollectedHeap::prepare_for_mutator_after_full_collection(size_t allocatio assert(num_free_regions() == 0, "we should not have added any free regions"); rebuild_region_sets(false /* free_list_only */); abort_refinement(); - resize_heap_if_necessary(allocation_word_size); - uncommit_regions_if_necessary(); + resize_heap_after_full_collection(allocation_word_size); // Rebuild the code root lists for each region rebuild_code_roots(); @@ -879,21 +878,41 @@ void G1CollectedHeap::upgrade_to_full_collection() { size_t(0) /* allocation_word_size */); } -void G1CollectedHeap::resize_heap_if_necessary(size_t allocation_word_size) { + +void G1CollectedHeap::resize_heap(size_t resize_bytes, bool should_expand) { + if (should_expand) { + expand(resize_bytes, _workers); + } else { + shrink(resize_bytes); + uncommit_regions_if_necessary(); + } +} + +void G1CollectedHeap::resize_heap_after_full_collection(size_t allocation_word_size) { assert_at_safepoint_on_vm_thread(); bool should_expand; - size_t resize_amount = _heap_sizing_policy->full_collection_resize_amount(should_expand, allocation_word_size); + size_t resize_bytes = _heap_sizing_policy->full_collection_resize_amount(should_expand, allocation_word_size); - if (resize_amount == 0) { - return; - } else if (should_expand) { - expand(resize_amount, _workers); - } else { - shrink(resize_amount); + if (resize_bytes != 0) { + resize_heap(resize_bytes, should_expand); } } +void G1CollectedHeap::resize_heap_after_young_collection(size_t allocation_word_size) { + Ticks start = Ticks::now(); + + bool should_expand; + + size_t resize_bytes = _heap_sizing_policy->young_collection_resize_amount(should_expand, allocation_word_size); + + if (resize_bytes != 0) { + resize_heap(resize_bytes, should_expand); + } + + phase_times()->record_resize_heap_time((Ticks::now() - start).seconds() * 1000.0); +} + HeapWord* G1CollectedHeap::satisfy_failed_allocation_helper(size_t word_size, bool do_gc, bool maximal_compaction, @@ -1005,18 +1024,21 @@ bool G1CollectedHeap::expand(size_t expand_bytes, WorkerThreads* pretouch_worker size_t aligned_expand_bytes = os::align_up_vm_page_size(expand_bytes); aligned_expand_bytes = align_up(aligned_expand_bytes, G1HeapRegion::GrainBytes); - log_debug(gc, ergo, heap)("Expand the heap. requested expansion amount: %zuB expansion amount: %zuB", - expand_bytes, aligned_expand_bytes); + uint num_regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes); + assert(num_regions_to_expand > 0, "Must expand by at least one region"); + + log_debug(gc, ergo, heap)("Heap resize. Requested expansion amount: %zuB aligned expansion amount: %zuB (%u regions)", + expand_bytes, aligned_expand_bytes, num_regions_to_expand); if (num_inactive_regions() == 0) { - log_debug(gc, ergo, heap)("Did not expand the heap (heap already fully expanded)"); + log_debug(gc, ergo, heap)("Heap resize. Did not expand the heap (heap already fully expanded)"); return false; } - uint regions_to_expand = (uint)(aligned_expand_bytes / G1HeapRegion::GrainBytes); - assert(regions_to_expand > 0, "Must expand by at least one region"); - uint expanded_by = _hrm.expand_by(regions_to_expand, pretouch_workers); + + uint expanded_by = _hrm.expand_by(num_regions_to_expand, pretouch_workers); + assert(expanded_by > 0, "must have failed during commit."); size_t actual_expand_bytes = expanded_by * G1HeapRegion::GrainBytes; @@ -1040,24 +1062,45 @@ bool G1CollectedHeap::expand_single_region(uint node_index) { } void G1CollectedHeap::shrink_helper(size_t shrink_bytes) { - size_t aligned_shrink_bytes = os::align_down_vm_page_size(shrink_bytes); - aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes); + assert(shrink_bytes > 0, "must be"); + assert(is_aligned(shrink_bytes, G1HeapRegion::GrainBytes), + "Shrink request for %zuB not aligned to heap region size %zuB", + shrink_bytes, G1HeapRegion::GrainBytes); + uint num_regions_to_remove = (uint)(shrink_bytes / G1HeapRegion::GrainBytes); uint num_regions_removed = _hrm.shrink_by(num_regions_to_remove); size_t shrunk_bytes = num_regions_removed * G1HeapRegion::GrainBytes; - log_debug(gc, ergo, heap)("Shrink the heap. requested shrinking amount: %zuB aligned shrinking amount: %zuB actual amount shrunk: %zuB", - shrink_bytes, aligned_shrink_bytes, shrunk_bytes); + log_debug(gc, ergo, heap)("Heap resize. Requested shrinking amount: %zuB actual shrinking amount: %zuB (%u regions)", + shrink_bytes, shrunk_bytes, num_regions_removed); if (num_regions_removed > 0) { - log_debug(gc, heap)("Uncommittable regions after shrink: %u", num_regions_removed); policy()->record_new_heap_size(num_committed_regions()); } else { - log_debug(gc, ergo, heap)("Did not shrink the heap (heap shrinking operation failed)"); + log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap shrinking operation failed)"); } } void G1CollectedHeap::shrink(size_t shrink_bytes) { + if (capacity() == min_capacity()) { + log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (heap already at minimum)"); + return; + } + + size_t aligned_shrink_bytes = os::align_down_vm_page_size(shrink_bytes); + aligned_shrink_bytes = align_down(aligned_shrink_bytes, G1HeapRegion::GrainBytes); + + aligned_shrink_bytes = capacity() - MAX2(capacity() - aligned_shrink_bytes, min_capacity()); + assert(is_aligned(aligned_shrink_bytes, G1HeapRegion::GrainBytes), "Bytes to shrink %zuB not aligned", aligned_shrink_bytes); + + log_debug(gc, ergo, heap)("Heap resize. Requested shrink amount: %zuB aligned shrink amount: %zuB", + shrink_bytes, aligned_shrink_bytes); + + if (aligned_shrink_bytes == 0) { + log_debug(gc, ergo, heap)("Heap resize. Did not shrink the heap (shrink request too small)"); + return; + } + _verifier->verify_region_sets_optional(); // We should only reach here at the end of a Full GC or during Remark which @@ -1069,7 +1112,7 @@ void G1CollectedHeap::shrink(size_t shrink_bytes) { // could instead use the remove_all_pending() method on free_list to // remove only the ones that we need to remove. _hrm.remove_all_free_regions(); - shrink_helper(shrink_bytes); + shrink_helper(aligned_shrink_bytes); rebuild_region_sets(true /* free_list_only */); _hrm.verify_optional(); @@ -1335,7 +1378,7 @@ jint G1CollectedHeap::initialize() { } os::trace_page_sizes("Heap", - MinHeapSize, + min_capacity(), reserved_byte_size, heap_rs.base(), heap_rs.size(), @@ -2021,7 +2064,7 @@ bool G1CollectedHeap::block_is_obj(const HeapWord* addr) const { } size_t G1CollectedHeap::tlab_capacity(Thread* ignored) const { - return (_policy->young_list_target_length() - _survivor.length()) * G1HeapRegion::GrainBytes; + return eden_target_length() * G1HeapRegion::GrainBytes; } size_t G1CollectedHeap::tlab_used(Thread* ignored) const { @@ -2042,6 +2085,10 @@ size_t G1CollectedHeap::max_capacity() const { return max_num_regions() * G1HeapRegion::GrainBytes; } +size_t G1CollectedHeap::min_capacity() const { + return MinHeapSize; +} + void G1CollectedHeap::prepare_for_verify() { _verifier->prepare_for_verify(); } @@ -2387,24 +2434,11 @@ void G1CollectedHeap::verify_after_young_collection(G1HeapVerifier::G1VerifyType phase_times()->record_verify_after_time_ms((Ticks::now() - start).seconds() * MILLIUNITS); } -void G1CollectedHeap::expand_heap_after_young_collection(){ - size_t expand_bytes = _heap_sizing_policy->young_collection_expansion_amount(); - if (expand_bytes > 0) { - // No need for an ergo logging here, - // expansion_amount() does this when it returns a value > 0. - Ticks expand_start = Ticks::now(); - if (expand(expand_bytes, _workers)) { - double expand_ms = (Ticks::now() - expand_start).seconds() * MILLIUNITS; - phase_times()->record_expand_heap_time(expand_ms); - } - } -} - -void G1CollectedHeap::do_collection_pause_at_safepoint() { +void G1CollectedHeap::do_collection_pause_at_safepoint(size_t allocation_word_size) { assert_at_safepoint_on_vm_thread(); guarantee(!is_stw_gc_active(), "collection is not reentrant"); - do_collection_pause_at_safepoint_helper(); + do_collection_pause_at_safepoint_helper(allocation_word_size); } G1HeapPrinterMark::G1HeapPrinterMark(G1CollectedHeap* g1h) : _g1h(g1h), _heap_transition(g1h) { @@ -2468,7 +2502,7 @@ void G1CollectedHeap::flush_region_pin_cache() { } } -void G1CollectedHeap::do_collection_pause_at_safepoint_helper() { +void G1CollectedHeap::do_collection_pause_at_safepoint_helper(size_t allocation_word_size) { ResourceMark rm; IsSTWGCActiveMark active_gc_mark; @@ -2486,7 +2520,7 @@ void G1CollectedHeap::do_collection_pause_at_safepoint_helper() { bool should_start_concurrent_mark_operation = collector_state()->in_concurrent_start_gc(); // Perform the collection. - G1YoungCollector collector(gc_cause()); + G1YoungCollector collector(gc_cause(), allocation_word_size); collector.collect(); // It should now be safe to tell the concurrent mark thread to start @@ -2608,6 +2642,13 @@ void G1CollectedHeap::set_young_gen_card_set_stats(const G1MonotonicArenaMemoryS void G1CollectedHeap::record_obj_copy_mem_stats() { size_t total_old_allocated = _old_evac_stats.allocated() + _old_evac_stats.direct_allocated(); + uint total_allocated = _survivor_evac_stats.regions_filled() + _old_evac_stats.regions_filled(); + + log_debug(gc)("Allocated %u survivor %u old percent total %1.2f%% (%u%%)", + _survivor_evac_stats.regions_filled(), _old_evac_stats.regions_filled(), + percent_of(total_allocated, num_committed_regions() - total_allocated), + G1ReservePercent); + policy()->old_gen_alloc_tracker()-> add_allocated_bytes_since_last_gc(total_old_allocated * HeapWordSize); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index ad440577f2d86..73838c4594ede 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -558,7 +558,9 @@ class G1CollectedHeap : public CollectedHeap { void pin_object(JavaThread* thread, oop obj) override; void unpin_object(JavaThread* thread, oop obj) override; - void resize_heap_if_necessary(size_t allocation_word_size); + void resize_heap_after_young_collection(size_t allocation_word_size); + void resize_heap_after_full_collection(size_t allocation_word_size); + void resize_heap(size_t resize_bytes, bool should_expand); // Check if there is memory to uncommit and if so schedule a task to do it. void uncommit_regions_if_necessary(); @@ -743,11 +745,11 @@ class G1CollectedHeap : public CollectedHeap { // followed by a by-policy upgrade to a full collection. // precondition: at safepoint on VM thread // precondition: !is_stw_gc_active() - void do_collection_pause_at_safepoint(); + void do_collection_pause_at_safepoint(size_t allocation_word_size = 0); // Helper for do_collection_pause_at_safepoint, containing the guts // of the incremental collection pause, executed by the vm thread. - void do_collection_pause_at_safepoint_helper(); + void do_collection_pause_at_safepoint_helper(size_t allocation_word_size); void verify_before_young_collection(G1HeapVerifier::G1VerifyType type); void verify_after_young_collection(G1HeapVerifier::G1VerifyType type); @@ -764,8 +766,6 @@ class G1CollectedHeap : public CollectedHeap { // Must be called before any decision based on pin counts. void flush_region_pin_cache(); - void expand_heap_after_young_collection(); - // Update object copying statistics. void record_obj_copy_mem_stats(); private: @@ -1022,6 +1022,8 @@ class G1CollectedHeap : public CollectedHeap { void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause); + bool last_gc_was_periodic() { return _gc_lastcause == GCCause::_g1_periodic_collection; } + void remove_from_old_gen_sets(const uint old_regions_removed, const uint humongous_regions_removed); void prepend_to_freelist(G1FreeRegionList* list); @@ -1190,6 +1192,7 @@ class G1CollectedHeap : public CollectedHeap { // Print the maximum heap capacity. size_t max_capacity() const override; + size_t min_capacity() const; Tickspan time_since_last_collection() const { return Ticks::now() - _collection_pause_end; } @@ -1204,6 +1207,7 @@ class G1CollectedHeap : public CollectedHeap { G1SurvivorRegions* survivor() { return &_survivor; } + inline uint eden_target_length() const; uint eden_regions_count() const { return _eden.length(); } uint eden_regions_count(uint node_index) const { return _eden.regions_on_node(node_index); } uint survivor_regions_count() const { return _survivor.length(); } diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp index 943c68b74c77a..553be04d28527 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.inline.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -316,4 +316,8 @@ inline bool G1CollectedHeap::is_collection_set_candidate(const G1HeapRegion* r) return candidates->contains(r); } +inline uint G1CollectedHeap::eden_target_length() const { + return _policy->young_list_target_length() - survivor_regions_count(); +} + #endif // SHARE_GC_G1_G1COLLECTEDHEAP_INLINE_HPP diff --git a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp index 25a9b80093c1d..6831ae700d960 100644 --- a/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp +++ b/src/hotspot/share/gc/g1/g1ConcurrentMark.cpp @@ -1461,8 +1461,9 @@ void G1ConcurrentMark::remark() { // GC pause. _g1h->increment_total_collections(); - _g1h->resize_heap_if_necessary(size_t(0) /* allocation_word_size */); - _g1h->uncommit_regions_if_necessary(); + if (_g1h->last_gc_was_periodic()) { + _g1h->resize_heap_after_full_collection(size_t(0) /* allocation_word_size */); + } compute_new_sizes(); diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp index f7b0cb23b65cc..9171c8fd16dfd 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.cpp @@ -178,7 +178,7 @@ void G1GCPhaseTimes::reset() { _cur_pre_evacuate_prepare_time_ms = 0.0; _cur_post_evacuate_cleanup_1_time_ms = 0.0; _cur_post_evacuate_cleanup_2_time_ms = 0.0; - _cur_expand_heap_time_ms = 0.0; + _cur_resize_heap_time_ms = 0.0; _cur_ref_proc_time_ms = 0.0; _cur_collection_start_sec = 0.0; _root_region_scan_wait_time_ms = 0.0; @@ -488,7 +488,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed _cur_post_evacuate_cleanup_2_time_ms + _recorded_total_rebuild_freelist_time_ms + _recorded_prepare_for_mutator_time_ms + - _cur_expand_heap_time_ms; + _cur_resize_heap_time_ms; info_time("Post Evacuate Collection Set", sum_ms); @@ -537,7 +537,7 @@ double G1GCPhaseTimes::print_post_evacuate_collection_set(bool evacuation_failed trace_phase(_gc_par_phases[RebuildFreeList]); debug_time("Prepare For Mutator", _recorded_prepare_for_mutator_time_ms); - debug_time("Expand Heap After Collection", _cur_expand_heap_time_ms); + debug_time("Resize Heap After Collection", _cur_resize_heap_time_ms); return sum_ms; } diff --git a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp index ea868cd939e8c..a92f4f977ffe4 100644 --- a/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp +++ b/src/hotspot/share/gc/g1/g1GCPhaseTimes.hpp @@ -188,7 +188,7 @@ class G1GCPhaseTimes : public CHeapObj { double _cur_post_evacuate_cleanup_1_time_ms; double _cur_post_evacuate_cleanup_2_time_ms; - double _cur_expand_heap_time_ms; + double _cur_resize_heap_time_ms; double _cur_ref_proc_time_ms; double _cur_collection_start_sec; @@ -283,8 +283,8 @@ class G1GCPhaseTimes : public CHeapObj { _cur_pre_evacuate_prepare_time_ms = ms; } - void record_expand_heap_time(double ms) { - _cur_expand_heap_time_ms = ms; + void record_resize_heap_time(double ms) { + _cur_resize_heap_time_ms = ms; } void record_initial_evac_time(double ms) { @@ -405,8 +405,8 @@ class G1GCPhaseTimes : public CHeapObj { _cur_collection_nmethod_list_cleanup_time_ms; } - double cur_expand_heap_time_ms() { - return _cur_expand_heap_time_ms; + double cur_resize_heap_time_ms() { + return _cur_resize_heap_time_ms; } double root_region_scan_wait_time_ms() { diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp index 71e2517716668..d7f80ecff9305 100644 --- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp +++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.cpp @@ -23,7 +23,7 @@ */ #include "gc/g1/g1Analytics.hpp" -#include "gc/g1/g1CollectedHeap.hpp" +#include "gc/g1/g1CollectedHeap.inline.hpp" #include "gc/g1/g1HeapSizingPolicy.hpp" #include "gc/shared/gc_globals.hpp" #include "logging/log.hpp" @@ -35,19 +35,42 @@ G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1h, const return new G1HeapSizingPolicy(g1h, analytics); } +uint G1HeapSizingPolicy::long_term_count_limit() const { + return _analytics->max_num_of_recorded_pause_times(); +} + G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics) : _g1h(g1h), _analytics(analytics), - _num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) { + // Bias for expansion at startup; the +1 is to counter the first sample always + // being 0.0, i.e. lower than any threshold. + _ratio_exceeds_threshold((MinOverThresholdForExpansion / 2) + 1), + _recent_pause_ratios(long_term_count_limit()), + _long_term_count(0) { + + assert(_ratio_exceeds_threshold < MinOverThresholdForExpansion, + "Initial ratio counter value too high."); + + assert(_ratio_exceeds_threshold > -MinOverThresholdForExpansion, + "Initial ratio counter value too low."); + + assert(MinOverThresholdForExpansion <= long_term_count_limit(), + "Expansion threshold count must be less than %u", long_term_count_limit()); + + assert(G1ShortTermShrinkThreshold <= long_term_count_limit(), + "Shrink threshold count must be less than %u", long_term_count_limit()); +} - assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics); - clear_ratio_check_data(); +void G1HeapSizingPolicy::reset_ratio_tracking_data() { + _long_term_count = 0; + _ratio_exceeds_threshold = 0; + // Keep the recent gc time ratio data. } -void G1HeapSizingPolicy::clear_ratio_check_data() { - _ratio_over_threshold_count = 0; - _ratio_over_threshold_sum = 0.0; - _pauses_since_start = 0; +void G1HeapSizingPolicy::decay_ratio_tracking_data() { + _long_term_count = 0; + _ratio_exceeds_threshold /= 2; + // Keep the recent gc time ratio data. } double G1HeapSizingPolicy::scale_with_heap(double pause_time_threshold) { @@ -63,137 +86,260 @@ double G1HeapSizingPolicy::scale_with_heap(double pause_time_threshold) { return threshold; } -static void log_expansion(double short_term_pause_time_ratio, - double long_term_pause_time_ratio, - double threshold, - double pause_time_ratio, - bool fully_expanded, - size_t resize_bytes) { +// Logistic function, returns values in the range [0,1] +static double sigmoid_function(double value) { + // Sigmoid Parameters: + double inflection_point = 1.0; // Inflection point where acceleration begins (midpoint of sigmoid). + double steepness = 6.0; + + return 1.0 / (1.0 + pow(M_E, -steepness * (value - inflection_point))); +} + +double G1HeapSizingPolicy::scale_resize_ratio_delta(double ratio_delta, + double min_scale_down_factor, + double max_scale_up_factor) const { + // We use a sigmoid function for scaling smoothly as we transition from a slow start to a fast growth + // function with increasing ratio_delta. The sigmoid outputs a value in the range [0,1] which we scale to + // the range [min_scale_down_factor, max_scale_up_factor] + double sigmoid = sigmoid_function(ratio_delta); + + double scale_factor = min_scale_down_factor + (max_scale_up_factor - min_scale_down_factor) * sigmoid; + return scale_factor; +} - log_debug(gc, ergo, heap)("Heap expansion: " +// Calculate the ratio of the difference of a and b relative to b. +static double rel_ratio(double a, double b) { + return (a - b) / b; +} + +static void log_resize(double short_term_pause_time_ratio, + double long_term_pause_time_ratio, + double lower_threshold, + double upper_threshold, + double pause_time_ratio, + bool at_limit, + size_t resize_bytes, + bool expand) { + + log_debug(gc, ergo, heap)("Heap resize: " "short term pause time ratio %1.2f%% long term pause time ratio %1.2f%% " - "threshold %1.2f%% pause time ratio %1.2f%% fully expanded %s " - "resize by %zuB", + "lower threshold %1.2f%% upper threshold %1.2f%% pause time ratio %1.2f%% " + "at limit %s resize by %zuB expand %s", short_term_pause_time_ratio * 100.0, long_term_pause_time_ratio * 100.0, - threshold * 100.0, + lower_threshold * 100.0, + upper_threshold * 100.0, pause_time_ratio * 100.0, - BOOL_TO_STR(fully_expanded), - resize_bytes); + BOOL_TO_STR(at_limit), + resize_bytes, + BOOL_TO_STR(expand)); } -size_t G1HeapSizingPolicy::young_collection_expansion_amount() { - assert(GCTimeRatio > 0, "must be"); +size_t G1HeapSizingPolicy::young_collection_expand_amount(double delta) const { + assert(delta >= 0.0, "must be"); - double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio(); - double short_term_pause_time_ratio = _analytics->short_term_pause_time_ratio(); - const double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio); - double threshold = scale_with_heap(pause_time_threshold); + size_t reserved_bytes = _g1h->max_capacity(); + size_t committed_bytes = _g1h->capacity(); + size_t uncommitted_bytes = reserved_bytes - committed_bytes; + size_t expand_bytes_via_pct = + uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; + size_t min_expand_bytes = MIN2(G1HeapRegion::GrainBytes, uncommitted_bytes); - size_t expand_bytes = 0; + // Take the current size, or G1ExpandByPercentOfAvailable % of + // the available expansion space, whichever is smaller, as the base + // expansion size. Then possibly scale this size according to how much the + // threshold has (on average) been exceeded by. + const double MinScaleDownFactor = 0.2; + const double MaxScaleUpFactor = 2.0; - if (_g1h->capacity() == _g1h->max_capacity()) { - log_expansion(short_term_pause_time_ratio, long_term_pause_time_ratio, - threshold, pause_time_threshold, true, 0); - clear_ratio_check_data(); - return expand_bytes; + double scale_factor = scale_resize_ratio_delta(delta, + MinScaleDownFactor, + MaxScaleUpFactor); + + size_t resize_bytes = MIN2(expand_bytes_via_pct, committed_bytes); + + resize_bytes = static_cast(resize_bytes * scale_factor); + + // Ensure the expansion size is at least the minimum growth amount + // and at most the remaining uncommitted byte size. + return clamp((size_t)resize_bytes, min_expand_bytes, uncommitted_bytes); +} + +size_t G1HeapSizingPolicy::young_collection_shrink_amount(double delta, size_t allocation_word_size) const { + assert(delta >= 0.0, "must be"); + + double scale_factor = scale_resize_ratio_delta(delta, + G1ShrinkByPercentOfAvailable / 1000.0, + G1ShrinkByPercentOfAvailable / 100.0); + assert(scale_factor <= 1.0, "must be"); + + // We are at the end of GC, so free regions are at maximum. Do not try to shrink + // to have less than the reserve or the number of regions we are most certainly + // going to use during this mutator phase. + uint target_regions_to_shrink = _g1h->num_free_regions(); + + uint reserve_regions = ceil(_g1h->num_committed_regions() * G1ReservePercent / 100.0); + + uint needed_for_allocation = _g1h->eden_target_length(); + if (_g1h->is_humongous(allocation_word_size)) { + needed_for_allocation += (uint) _g1h->humongous_obj_size_in_regions(allocation_word_size); } - // If the last GC time ratio is over the threshold, increment the count of - // times it has been exceeded, and add this ratio to the sum of exceeded - // ratios. - if (short_term_pause_time_ratio > threshold) { - _ratio_over_threshold_count++; - _ratio_over_threshold_sum += short_term_pause_time_ratio; + if (target_regions_to_shrink >= needed_for_allocation) { + target_regions_to_shrink -= needed_for_allocation; + } else { + target_regions_to_shrink = 0; + } + + size_t resize_bytes = (double)G1HeapRegion::GrainBytes * target_regions_to_shrink * scale_factor; + + log_debug(gc, ergo, heap)("Shrink log: scale factor %1.2f%% " + "total free regions %u " + "reserve regions %u " + "needed for alloc %u " + "base targeted for shrinking %u " + "resize_bytes %zd ( %zu regions)", + scale_factor * 100.0, + _g1h->num_free_regions(), + reserve_regions, + needed_for_allocation, + target_regions_to_shrink, + resize_bytes, + (resize_bytes / G1HeapRegion::GrainBytes)); + + return resize_bytes; +} + +size_t G1HeapSizingPolicy::young_collection_resize_amount(bool& expand, size_t allocation_word_size) { + assert(GCTimeRatio > 0, "must be"); + expand = false; + + const double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio(); + const double short_term_pause_time_ratio = _analytics->short_term_pause_time_ratio(); + + // Calculate gc time ratio thresholds: + // - upper threshold, directly based on GCTimeRatio. We do not want to exceed + // this. + // - lower threshold, we do not want to go under. + // - mid threshold, halfway between upper and lower threshold, represents the + // actual target when resizing the heap. + double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio); + + pause_time_threshold = scale_with_heap(pause_time_threshold); + const double min_gc_time_ratio_ratio = G1MinimumPercentOfGCTimeRatio / 100.0; + double upper_threshold = pause_time_threshold * (1 + min_gc_time_ratio_ratio); + double lower_threshold = pause_time_threshold * (1 - min_gc_time_ratio_ratio); + + // Use threshold based relative to current GCTimeRatio to more quickly expand + // and shrink at smaller heap sizes (relative to maximum). + const double long_term_delta = rel_ratio(long_term_pause_time_ratio, pause_time_threshold); + double short_term_ratio_delta = rel_ratio(short_term_pause_time_ratio, pause_time_threshold); + + // If the short term GC time ratio exceeds a threshold, increment the occurrence + // counter. + if (short_term_pause_time_ratio > upper_threshold) { + _ratio_exceeds_threshold++; + } else if (short_term_pause_time_ratio < lower_threshold) { + _ratio_exceeds_threshold--; + } + // Ignore very first sample as it is garbage. + if (_long_term_count != 0 || _recent_pause_ratios.num() != 0) { + _recent_pause_ratios.add(short_term_ratio_delta); } + _long_term_count++; + + log_trace(gc, ergo, heap)("Heap resize triggers: long term count: %u " + "long term interval: %u " + "delta: %1.2f " + "ratio exceeds threshold count: %d", + _long_term_count, + long_term_count_limit(), + short_term_ratio_delta, + _ratio_exceeds_threshold); + + log_debug(gc, ergo, heap)("Heap triggers: pauses-since-start: %u num-prev-pauses-for-heuristics: %u ratio-exceeds-threshold-count: %d", + _recent_pause_ratios.num(), long_term_count_limit(), _ratio_exceeds_threshold); + + // Check if there is a short- or long-term need for resizing, expansion first. + // + // Short-term resizing need is detected by exceeding the upper or lower thresholds + // multiple times, tracked in _ratio_exceeds_threshold. If it contains a large + // positive or negative (larger than the respective thresholds), we trigger + // resizing calculation. + // + // Slowly occurring long-term changes to the actual gc time ratios are checked + // only every once a while. + // + // The _ratio_exceeds_threshold value is reset after each resize, or slowly + // decayed if nothing happens. + + size_t resize_bytes = 0; + + const bool use_long_term_delta = (_long_term_count == long_term_count_limit()); + const double short_term_delta = _recent_pause_ratios.avg(); + + double delta; + if (use_long_term_delta) { + // For expansion, deltas are positive, and we want to expand aggressively. + // For shrinking, deltas are negative, so the MAX2 below selects the least + // aggressive one as we are using the absolute value for scaling. + delta = MAX2(short_term_delta, long_term_delta); + } else { + delta = short_term_delta; + } + // Delta is negative when shrinking, but the calculation of the resize amount + // always expects an absolute value. Do that here unconditionally. + delta = fabsd(delta); + + int ThresholdForShrink = (int)MIN2(G1ShortTermShrinkThreshold, long_term_count_limit()); - log_trace(gc, ergo, heap)("Heap expansion triggers: pauses since start: %u " - "num prev pauses for heuristics: %u " - "ratio over threshold count: %u", - _pauses_since_start, - _num_prev_pauses_for_heuristics, - _ratio_over_threshold_count); - - // Check if we've had enough GC time ratio checks that were over the - // threshold to trigger an expansion. We'll also expand if we've - // reached the end of the history buffer and the average of all entries - // is still over the threshold. This indicates a smaller number of GCs were - // long enough to make the average exceed the threshold. - bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics; - if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) || - (filled_history_buffer && (long_term_pause_time_ratio > threshold))) { - size_t min_expand_bytes = G1HeapRegion::GrainBytes; - size_t reserved_bytes = _g1h->max_capacity(); - size_t committed_bytes = _g1h->capacity(); - size_t uncommitted_bytes = reserved_bytes - committed_bytes; - size_t expand_bytes_via_pct = - uncommitted_bytes * G1ExpandByPercentOfAvailable / 100; - double scale_factor = 1.0; - - // If the current size is less than 1/4 of the Initial heap size, expand - // by half of the delta between the current and Initial sizes. IE, grow - // back quickly. - // - // Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of - // the available expansion space, whichever is smaller, as the base - // expansion size. Then possibly scale this size according to how much the - // threshold has (on average) been exceeded by. If the delta is small - // (less than the StartScaleDownAt value), scale the size down linearly, but - // not by less than MinScaleDownFactor. If the delta is large (greater than - // the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor - // times the base size. The scaling will be linear in the range from - // StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words, - // ScaleUpRange sets the rate of scaling up. - if (committed_bytes < InitialHeapSize / 4) { - expand_bytes = (InitialHeapSize - committed_bytes) / 2; - } else { - double const MinScaleDownFactor = 0.2; - double const MaxScaleUpFactor = 2; - double const StartScaleDownAt = pause_time_threshold; - double const StartScaleUpAt = pause_time_threshold * 1.5; - double const ScaleUpRange = pause_time_threshold * 2.0; - - double ratio_delta; - if (filled_history_buffer) { - ratio_delta = long_term_pause_time_ratio - threshold; - } else { - ratio_delta = (_ratio_over_threshold_sum / _ratio_over_threshold_count) - threshold; - } - - expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes); - if (ratio_delta < StartScaleDownAt) { - scale_factor = ratio_delta / StartScaleDownAt; - scale_factor = MAX2(scale_factor, MinScaleDownFactor); - } else if (ratio_delta > StartScaleUpAt) { - scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange); - scale_factor = MIN2(scale_factor, MaxScaleUpFactor); - } + if ((_ratio_exceeds_threshold == MinOverThresholdForExpansion) || + (use_long_term_delta && (long_term_pause_time_ratio > upper_threshold))) { + + // Short-cut calculation if already at maximum capacity. + if (_g1h->capacity() == _g1h->max_capacity()) { + log_resize(short_term_pause_time_ratio, long_term_pause_time_ratio, + lower_threshold, upper_threshold, pause_time_threshold, true, 0, expand); + reset_ratio_tracking_data(); + return resize_bytes; } - expand_bytes = static_cast(expand_bytes * scale_factor); + log_trace(gc, ergo, heap)("expand deltas long %1.2f short %1.2f use long term %u delta %1.2f", + long_term_delta, short_term_delta, use_long_term_delta, delta); - // Ensure the expansion size is at least the minimum growth amount - // and at most the remaining uncommitted byte size. - expand_bytes = clamp(expand_bytes, min_expand_bytes, uncommitted_bytes); + resize_bytes = young_collection_expand_amount(delta); + expand = true; - clear_ratio_check_data(); - } else { - // An expansion was not triggered. If we've started counting, increment - // the number of checks we've made in the current window. If we've - // reached the end of the window without resizing, clear the counters to - // start again the next time we see a ratio above the threshold. - if (_ratio_over_threshold_count > 0) { - _pauses_since_start++; - if (_pauses_since_start > _num_prev_pauses_for_heuristics) { - clear_ratio_check_data(); - } + reset_ratio_tracking_data(); + } else if ((_ratio_exceeds_threshold == -ThresholdForShrink) || + (use_long_term_delta && (long_term_pause_time_ratio < lower_threshold))) { + + // Short-cut calculation if already at minimum capacity. + if (_g1h->capacity() == _g1h->min_capacity()) { + log_resize(short_term_pause_time_ratio, long_term_pause_time_ratio, + lower_threshold, upper_threshold, pause_time_threshold, true, 0, expand); + reset_ratio_tracking_data(); + return resize_bytes; } + + log_trace(gc, ergo, heap)("expand deltas long %1.2f short %1.2f use long term %u delta %1.2f", + long_term_delta, short_term_delta, use_long_term_delta, delta); + + resize_bytes = young_collection_shrink_amount(delta, allocation_word_size); + expand = false; + + reset_ratio_tracking_data(); + } else if (use_long_term_delta) { + // A resize has not been triggered, but the long term counter overflowed. + decay_ratio_tracking_data(); + expand = true; // Does not matter. } - log_expansion(short_term_pause_time_ratio, long_term_pause_time_ratio, - threshold, pause_time_threshold, false, expand_bytes); + log_resize(short_term_pause_time_ratio, long_term_pause_time_ratio, + lower_threshold, upper_threshold, pause_time_threshold, + false, resize_bytes, expand); - return expand_bytes; + return resize_bytes; } static size_t target_heap_capacity(size_t used_bytes, uintx free_ratio) { @@ -257,17 +403,17 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand, size_t al // Should not be greater than the heap max size. No need to adjust // it with respect to the heap min size as it's a lower bound (i.e., // we'll try to make the capacity larger than it, not smaller). - minimum_desired_capacity = MIN2(minimum_desired_capacity, MaxHeapSize); + minimum_desired_capacity = MIN2(minimum_desired_capacity, _g1h->max_capacity()); // Should not be less than the heap min size. No need to adjust it // with respect to the heap max size as it's an upper bound (i.e., // we'll try to make the capacity smaller than it, not greater). - maximum_desired_capacity = MAX2(maximum_desired_capacity, MinHeapSize); + maximum_desired_capacity = MAX2(maximum_desired_capacity, _g1h->min_capacity()); // Don't expand unless it's significant; prefer expansion to shrinking. if (capacity_after_gc < minimum_desired_capacity) { size_t expand_bytes = minimum_desired_capacity - capacity_after_gc; - log_debug(gc, ergo, heap)("Attempt heap expansion (capacity lower than min desired capacity). " + log_debug(gc, ergo, heap)("Heap resize. Attempt heap expansion (capacity lower than min desired capacity). " "Capacity: %zuB occupancy: %zuB live: %zuB " "min_desired_capacity: %zuB (%zu %%)", capacity_after_gc, used_after_gc, _g1h->used(), minimum_desired_capacity, MinHeapFreeRatio); @@ -279,7 +425,7 @@ size_t G1HeapSizingPolicy::full_collection_resize_amount(bool& expand, size_t al // Capacity too large, compute shrinking size size_t shrink_bytes = capacity_after_gc - maximum_desired_capacity; - log_debug(gc, ergo, heap)("Attempt heap shrinking (capacity higher than max desired capacity). " + log_debug(gc, ergo, heap)("Heap resize. Attempt heap shrinking (capacity higher than max desired capacity). " "Capacity: %zuB occupancy: %zuB live: %zuB " "maximum_desired_capacity: %zuB (%zu %%)", capacity_after_gc, used_after_gc, _g1h->used(), maximum_desired_capacity, MaxHeapFreeRatio); diff --git a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp index 4ee302403ed37..436e44ebd152d 100644 --- a/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp +++ b/src/hotspot/share/gc/g1/g1HeapSizingPolicy.hpp @@ -26,35 +26,85 @@ #define SHARE_GC_G1_G1HEAPSIZINGPOLICY_HPP #include "memory/allocation.hpp" +#include "utilities/numberSeq.hpp" class G1Analytics; class G1CollectedHeap; +// +// Contains heuristics to resize the heap, i.e. expand or shrink, during operation. +// +// For young collections, this heuristics is based on gc time ratio, i.e. trying +// to change the heap so that current gc time ratio stays approximately as +// selected by the user. +// +// The heuristics tracks both short and long term GC behavior to effect heap size +// change. +// +// Short term tracking is based on the short-term gc time ratio i.e we count +// events for which short-term gc time ratio is outside the range of +// [GCTimeRatio * (1 - G1MinimumPercentOfGCTimeRatio / 100), GCTimeRatio * (1 + G1MinimumPercentOfGCTimeRatio / 100)] +// If below that range, we decrement that counter, if above, we increment it. +// +// The intent of this mechanism is to filter short term events because heap sizing has +// some overhead. +// +// If that counter reaches the MinOverThresholdForExpansion we consider expansion, +// if that counter reaches -G1ShortTermShrinkThreshold we consider shrinking the heap. +// +// While doing so, we accumulate the relative difference to the midpoint of this range +// (GCTimeRatio) to guide the expansion/shrinking amount. +// +// Further, if there is no short-term based resizing event for a "long" time, we +// decay that counter, i.e. drop it towards zero again to avoid that previous +// intermediate length short term behavior followed by a quiet time and a single +// short term event causes unnecessary resizes. +// +// Long term behavior is solely managed by regularly comparing actual long term gc +// time ratio with the boundaries of above range in regular long term intervals. +// If current long term gc time ratio is outside, expand or shrink respectively. +// +// For full collections, we base resize decisions only on Min/MaxHeapFreeRatio. +// class G1HeapSizingPolicy: public CHeapObj { - // MinOverThresholdForGrowth must be less than the number of recorded - // pause times in G1Analytics, representing the minimum number of pause - // time ratios that exceed GCTimeRatio before a heap expansion will be triggered. - const static uint MinOverThresholdForGrowth = 4; + // MinOverThresholdForExpansion defines the number of actual gc time + // ratios over the upper and lower thresholds respectively. + const static int MinOverThresholdForExpansion = 4; const G1CollectedHeap* _g1h; const G1Analytics* _analytics; - const uint _num_prev_pauses_for_heuristics; - // Ratio check data for determining if heap growth is necessary. - uint _ratio_over_threshold_count; - double _ratio_over_threshold_sum; - uint _pauses_since_start; + uint long_term_count_limit() const; + // Number of times short-term gc time ratio crossed the lower or upper threshold + // recently; every time the upper threshold is exceeded, it is incremented, and + // decremented if the lower threshold is exceeded. + int _ratio_exceeds_threshold; + // Recent actual gc time ratios relative to the middle of lower and upper threshold. + TruncatedSeq _recent_pause_ratios; + uint _long_term_count; - // Scale "full" gc pause time threshold with heap size as we want to resize more + // Clear ratio tracking data used by resize_amount(). + void reset_ratio_tracking_data(); + // Decay (move towards "no changes") ratio tracking data. + void decay_ratio_tracking_data(); + + // Scale "full" gc time ratio threshold with heap size as we want to resize more // eagerly at small heap sizes. double scale_with_heap(double pause_time_threshold); + // Scale the ratio delta depending on the relative difference from the target gc time ratio. + double scale_resize_ratio_delta(double ratio_delta, double min_scale_down_factor, double max_scale_up_factor) const; + + size_t young_collection_expand_amount(double delta) const; + size_t young_collection_shrink_amount(double delta, size_t allocation_word_size) const; + G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics); public: - // If an expansion would be appropriate, because recent GC overhead had - // exceeded the desired limit, return an amount to expand by. - size_t young_collection_expansion_amount(); + // Return by how many bytes the heap should be changed based on recent gc time + // ratio after young collection. If expand is set, the heap should be expanded, + // otherwise shrunk. + size_t young_collection_resize_amount(bool& expand, size_t allocation_word_size); // Returns the amount of bytes to resize the heap; if expand is set, the heap // should by expanded by that amount, shrunk otherwise. diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.cpp b/src/hotspot/share/gc/g1/g1YoungCollector.cpp index b56d9991acbb6..88aee44c58a87 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.cpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.cpp @@ -1062,7 +1062,7 @@ void G1YoungCollector::post_evacuate_collection_set(G1EvacInfo* evacuation_info, _g1h->gc_epilogue(false); - _g1h->expand_heap_after_young_collection(); + _g1h->resize_heap_after_young_collection(_allocation_word_size); } bool G1YoungCollector::evacuation_failed() const { @@ -1077,9 +1077,11 @@ bool G1YoungCollector::evacuation_alloc_failed() const { return _evac_failure_regions.has_regions_alloc_failed(); } -G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause) : +G1YoungCollector::G1YoungCollector(GCCause::Cause gc_cause, + size_t allocation_word_size) : _g1h(G1CollectedHeap::heap()), _gc_cause(gc_cause), + _allocation_word_size(allocation_word_size), _concurrent_operation_is_full_mark(false), _evac_failure_regions() { diff --git a/src/hotspot/share/gc/g1/g1YoungCollector.hpp b/src/hotspot/share/gc/g1/g1YoungCollector.hpp index d683538980129..2c4929958fe69 100644 --- a/src/hotspot/share/gc/g1/g1YoungCollector.hpp +++ b/src/hotspot/share/gc/g1/g1YoungCollector.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2021, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2021, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -79,6 +79,7 @@ class G1YoungCollector { G1YoungGCAllocationFailureInjector* allocation_failure_injector() const; GCCause::Cause _gc_cause; + size_t _allocation_word_size; bool _concurrent_operation_is_full_mark; @@ -137,7 +138,8 @@ class G1YoungCollector { bool evacuation_alloc_failed() const; public: - G1YoungCollector(GCCause::Cause gc_cause); + G1YoungCollector(GCCause::Cause gc_cause, + size_t allocation_word_size); void collect(); bool concurrent_operation_is_full_mark() const { return _concurrent_operation_is_full_mark; } diff --git a/src/hotspot/share/gc/g1/g1_globals.hpp b/src/hotspot/share/gc/g1/g1_globals.hpp index 44d0d22257eed..6bb7574756c84 100644 --- a/src/hotspot/share/gc/g1/g1_globals.hpp +++ b/src/hotspot/share/gc/g1/g1_globals.hpp @@ -1,5 +1,5 @@ /* - * Copyright (c) 2001, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2001, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -161,6 +161,19 @@ "When expanding, % of uncommitted space to claim.") \ range(0, 100) \ \ + product(uint, G1ShrinkByPercentOfAvailable, 50, EXPERIMENTAL, \ + "When shrinking, maximum % of free space to claim.") \ + range(0, 100) \ + \ + product(uint, G1MinimumPercentOfGCTimeRatio, 25, EXPERIMENTAL, \ + "Percentage of GCTimeRatio G1 will try to avoid going below.") \ + range(0, 100) \ + \ + product(uint, G1ShortTermShrinkThreshold, 8, EXPERIMENTAL, \ + "Number of consecutive GCs with the short term gc time ratio" \ + "below the threshold before we attempt to shrink.") \ + range(0, 10) \ + \ product(size_t, G1UpdateBufferSize, 256, \ "Size of an update buffer") \ constraint(G1UpdateBufferSizeConstraintFunc, AtParse) \ diff --git a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java index e30cd3c52069d..17ae437358df1 100644 --- a/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java +++ b/test/hotspot/jtreg/gc/g1/TestGCLogMessages.java @@ -1,5 +1,5 @@ /* - * Copyright (c) 2014, 2024, Oracle and/or its affiliates. All rights reserved. + * Copyright (c) 2014, 2025, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it @@ -38,8 +38,8 @@ * gc.g1.TestGCLogMessages */ -import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.Platform; +import jdk.test.lib.process.OutputAnalyzer; import jdk.test.lib.process.ProcessTools; import jdk.test.whitebox.code.Compiler; @@ -194,7 +194,7 @@ public boolean isAvailable() { new LogMessageWithLevel("Serial Rebuild Free List:", Level.TRACE), new LogMessageWithLevel("Parallel Rebuild Free List \\(ms\\):", Level.TRACE), new LogMessageWithLevel("Prepare For Mutator:", Level.DEBUG), - new LogMessageWithLevel("Expand Heap After Collection:", Level.DEBUG), + new LogMessageWithLevel("Resize Heap After Collection:", Level.DEBUG), }; void checkMessagesAtLevel(OutputAnalyzer output, LogMessageWithLevel messages[], Level level) throws Exception { @@ -320,8 +320,7 @@ private void testExpandHeap() throws Exception { "-XX:+WhiteBoxAPI", GCTest.class.getName()); - output.shouldContain("Expand the heap. requested expansion amount: "); - output.shouldContain("B expansion amount: "); + output.shouldContain("Heap resize: "); output.shouldHaveExitValue(0); }