diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index a7e6fe7485..7ebcf1b37b 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -2,8 +2,8 @@ use crate::plan::global::BasePlan; //Modify use crate::plan::global::CommonPlan; // Add use crate::plan::global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; -use crate::plan::mygc::mutator::ALLOCATOR_MAPPING; use crate::plan::mygc::gc_work::MyGCWorkContext; +use crate::plan::mygc::mutator::ALLOCATOR_MAPPING; use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::plan::PlanConstraints; @@ -12,13 +12,13 @@ use crate::policy::space::Space; use crate::scheduler::*; // Modify use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; use enum_map::EnumMap; use std::sync::atomic::{AtomicBool, Ordering}; // Add -// ANCHOR_END: imports_no_gc_work + // ANCHOR_END: imports_no_gc_work // Remove #[allow(unused_imports)]. // Remove handle_user_collection_request(). @@ -66,7 +66,7 @@ impl Plan for MyGC { }, space_mapping: vec![ // The tospace argument doesn't matter, we will rebind before a GC anyway. - (CopySelector::CopySpace(0), &self.copyspace0) + (CopySelector::CopySpace(0), &self.copyspace0), ], constraints: &MYGC_CONSTRAINTS, } @@ -167,13 +167,35 @@ impl MyGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + // ANCHOR: specify_spaces + let copyspace0_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + let copyspace1_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + // ANCHOR_END: specify_spaces + + // ANCHOR: create_common_plan + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + // ANCHOR_END: create_common_plan + let res = MyGC { hi: AtomicBool::new(false), - // ANCHOR: copyspace_new - copyspace0: CopySpace::new(plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false), - // ANCHOR_END: copyspace_new - copyspace1: CopySpace::new(plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true), - common: CommonPlan::new(plan_args), + // ANCHOR: copyspaces_new + copyspace0: CopySpace::new( + plan_args.get_space_args("copyspace0", true, copyspace0_resp.unwrap()), + false, + ), + copyspace1: CopySpace::new( + plan_args.get_space_args("copyspace1", true, copyspace1_resp.unwrap()), + true, + ), + // ANCHOR_END: copyspaces_new + common, }; res.verify_side_metadata_sanity(); diff --git a/docs/userguide/src/tutorial/mygc/ss/alloc.md b/docs/userguide/src/tutorial/mygc/ss/alloc.md index 1da743a41a..c27635854b 100644 --- a/docs/userguide/src/tutorial/mygc/ss/alloc.md +++ b/docs/userguide/src/tutorial/mygc/ss/alloc.md @@ -82,24 +82,49 @@ use mmtk_macros::{HasSpaces, PlanTraceObject}; #### Constructor -Change `fn new()`. This section initialises and prepares the objects in MyGC +Change `fn new()`. This section initialises and prepares the objects in MyGC that you just defined. - 1. Delete the definition of `mygc_space`. - Instead, we will define the two copyspaces here. - 2. Define one of the copyspaces by adding the following code: ```rust -{{#include ../../code/mygc_semispace/global.rs:copyspace_new}} +{{#include ../../code/mygc_semispace/global.rs:plan_new}} ``` - 3. Create another copyspace, called `copyspace1`, defining it as a fromspace - instead of a tospace. (Hint: the definitions for - copyspaces are in `src/policy/copyspace.rs`.) - 4. Finally, replace the old MyGC initializer. +We now look into the steps. + +First, we use the `HeapMeta` object provided from the argument to specify all +spaces that will be created in the current plan. + ```rust -{{#include ../../code/mygc_semispace/global.rs:plan_new}} +{{#include ../../code/mygc_semispace/global.rs:specify_spaces}} ``` +We do not have special requirements for either of the copy-spaces, so we just +specify `VMRequest::Unrestricted` here. At this step, the return values +`copyspace0_resp` and `copyspace1_resp` has not become usable, yet. + +Then, we construct the parent structure `CommonPlan::new()`. + +```rust +{{#include ../../code/mygc_semispace/global.rs:create_common_plan}} +``` + +`CommonPlan::new()` will call `BasePlan::new()` which, in turn, will call +`HeapMeta::place_spaces()`. That will determine the address range of all +spaces we specified. + +After this, we can call `copyspace0_resp.unwrap()` to retrieve the computed +placement information for creating `copyspace0`. `copyspace1` is similar. We +can now create the two `CopySpace` instances. + +```rust +{{#include ../../code/mygc_semispace/global.rs:copyspaces_new}} +``` + +Note that `CommonSpace` and `BaseSpace` also define other spaces, such as the +large object space. Their constructors specify their spaces before +determining their address ranges and instantiating them, just like we +discribed here. + ### Access MyGC spaces Add a new section of methods for MyGC: diff --git a/src/mmtk.rs b/src/mmtk.rs index 12f3e77cba..0c27435455 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -12,15 +12,15 @@ use crate::util::analysis::AnalysisManager; use crate::util::edge_logger::EdgeLogger; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::gc_trigger::GCTrigger; +use crate::util::heap::heap_meta::HeapMeta; use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; -use crate::util::heap::HeapMeta; -use crate::util::opaque_pointer::*; use crate::util::options::Options; use crate::util::reference_processor::ReferenceProcessors; #[cfg(feature = "sanity")] use crate::util::sanity::sanity_checker::SanityChecker; use crate::util::statistics::stats::Stats; +use crate::util::{opaque_pointer::*, Address}; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use std::cell::UnsafeCell; @@ -188,8 +188,14 @@ impl MMTK { // TODO: This probably does not work if we have multiple MMTk instances. VM_MAP.boot(); - // This needs to be called after we create Plan. It needs to use HeapMeta, which is gradually built when we create spaces. - VM_MAP.finalize_static_space_map(heap.get_discontig_start(), heap.get_discontig_end()); + + // `Map32` uses `finalize_static_space_map` this to initialize the global freelists, which is reasonable. + // `Map64` uses `finalize_static_space_map` this to fix the starting addresses of `RawMemoryFreeList` instances, which is a bug and should be fixed. + // Since `Map64` doesn't read the start and end of the discontiguous range in the function at all, we can leave them as zeroes. + let discontig_range = heap + .get_discontiguous_range() + .unwrap_or(Address::ZERO..Address::ZERO); + VM_MAP.finalize_static_space_map(discontig_range.start, discontig_range.end); if *options.transparent_hugepages { MMAPPER.set_mmap_strategy(crate::util::memory::MmapStrategy::TransparentHugePages); diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index c0cb53bfde..84f16a3d70 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -16,7 +16,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -200,17 +200,29 @@ impl GenCopy { crate::plan::generational::new_generational_global_metadata_specs::(), }; + let copyspace0_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + let copyspace1_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let gen = CommonGenPlan::new(&mut plan_args); + let copyspace0 = CopySpace::new( - plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace0", true, copyspace0_resp.unwrap()), false, ); let copyspace1 = CopySpace::new( - plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace1", true, copyspace1_resp.unwrap()), true, ); let res = GenCopy { - gen: CommonGenPlan::new(plan_args), + gen, hi: AtomicBool::new(false), copyspace0, copyspace1, diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index e12430f576..f827a7f24b 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -6,7 +6,7 @@ use crate::policy::copyspace::CopySpace; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::copy::CopySemantics; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::statistics::counter::EventCounter; use crate::util::Address; use crate::util::ObjectReference; @@ -37,20 +37,24 @@ pub struct CommonGenPlan { } impl CommonGenPlan { - pub fn new(mut args: CreateSpecificPlanArgs) -> Self { + pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { + let nursery_resp = args.global_args.heap.specify_space(VMRequest::Extent { + extent: args.global_args.options.get_max_nursery_bytes(), + top: false, + }); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(args); + let nursery = CopySpace::new( - args.get_space_args( - "nursery", - true, - VMRequest::fixed_extent(args.global_args.options.get_max_nursery_bytes(), false), - ), + args.get_space_args("nursery", true, nursery_resp.unwrap()), true, ); + let full_heap_gc_count = args .global_args .stats .new_event_counter("majorGC", true, true); - let common = CommonPlan::new(args); CommonGenPlan { nursery, diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index f1e54cf058..4a7831a7da 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -17,7 +17,7 @@ use crate::scheduler::GCWorkScheduler; use crate::scheduler::GCWorker; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -231,8 +231,17 @@ impl GenImmix { global_side_metadata_specs: crate::plan::generational::new_generational_global_metadata_specs::(), }; + + let immix_space_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let gen = CommonGenPlan::new(&mut plan_args); + let immix_space = ImmixSpace::new( - plan_args.get_space_args("immix_mature", true, VMRequest::discontiguous()), + plan_args.get_space_args("immix_mature", true, immix_space_resp.unwrap()), ImmixSpaceArgs { reset_log_bit_in_major_gc: false, // We don't need to unlog objects at tracing. Instead, we unlog objects at copying. @@ -244,7 +253,7 @@ impl GenImmix { ); let genimmix = GenImmix { - gen: CommonGenPlan::new(plan_args), + gen, immix_space, last_gc_was_defrag: AtomicBool::new(false), last_gc_was_full_heap: AtomicBool::new(false), diff --git a/src/plan/global.rs b/src/plan/global.rs index d99017a30f..97c2ff9df3 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -14,10 +14,9 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::{CopyConfig, GCWorkerCopyContext}; use crate::util::heap::gc_trigger::GCTrigger; +use crate::util::heap::heap_meta::{HeapMeta, VMResponse, VMRequest}; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; -use crate::util::heap::HeapMeta; -use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::options::Options; @@ -366,16 +365,15 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { &mut self, name: &'static str, zeroed: bool, - vmrequest: VMRequest, + space_meta: VMResponse, ) -> PlanCreateSpaceArgs { PlanCreateSpaceArgs { name, zeroed, - vmrequest, + space_meta, global_side_metadata_specs: self.global_side_metadata_specs.clone(), vm_map: self.global_args.vm_map, mmapper: self.global_args.mmapper, - heap: self.global_args.heap, constraints: self.constraints, gc_trigger: self.global_args.gc_trigger.clone(), scheduler: self.global_args.scheduler.clone(), @@ -387,36 +385,47 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { impl BasePlan { #[allow(unused_mut)] // 'args' only needs to be mutable for certain features - pub fn new(mut args: CreateSpecificPlanArgs) -> BasePlan { + pub fn new(args: &mut CreateSpecificPlanArgs) -> BasePlan { + #[cfg(feature = "code_space")] + let code_space_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + #[cfg(feature = "code_space")] + let code_lo_space_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + #[cfg(feature = "ro_space")] + let ro_space_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + // NOTE: We don't specify VM space because it doesn't use any information in `VMResponse`. + + // BasePlan does not have any nested structs with spaces. We now place spaces. + args.global_args.heap.place_spaces(); + BasePlan { #[cfg(feature = "code_space")] code_space: ImmortalSpace::new(args.get_space_args( "code_space", true, - VMRequest::discontiguous(), + code_space_resp.unwrap(), )), #[cfg(feature = "code_space")] code_lo_space: ImmortalSpace::new(args.get_space_args( "code_lo_space", true, - VMRequest::discontiguous(), + code_lo_space_resp.unwrap(), )), #[cfg(feature = "ro_space")] ro_space: ImmortalSpace::new(args.get_space_args( "ro_space", true, - VMRequest::discontiguous(), + ro_space_resp.unwrap(), )), #[cfg(feature = "vm_space")] vm_space: VMSpace::new(args.get_space_args( "vm_space", false, - VMRequest::discontiguous(), + VMResponse::vm_space_dummy(), )), global_state: args.global_args.state.clone(), - gc_trigger: args.global_args.gc_trigger, - options: args.global_args.options, + gc_trigger: args.global_args.gc_trigger.clone(), + options: args.global_args.options.clone(), } } @@ -550,23 +559,26 @@ pub struct CommonPlan { } impl CommonPlan { - pub fn new(mut args: CreateSpecificPlanArgs) -> CommonPlan { + pub fn new(args: &mut CreateSpecificPlanArgs) -> CommonPlan { + let immortal_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + let los_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + let nonmoving_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + + let base = BasePlan::new(args); + CommonPlan { immortal: ImmortalSpace::new(args.get_space_args( "immortal", true, - VMRequest::discontiguous(), + immortal_resp.unwrap(), )), - los: LargeObjectSpace::new( - args.get_space_args("los", true, VMRequest::discontiguous()), - false, - ), + los: LargeObjectSpace::new(args.get_space_args("los", true, los_resp.unwrap()), false), nonmoving: ImmortalSpace::new(args.get_space_args( "nonmoving", true, - VMRequest::discontiguous(), + nonmoving_resp.unwrap(), )), - base: BasePlan::new(args), + base, } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 3b1d6dfbd2..5eca301f8a 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -13,7 +13,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::vm::VMBinding; use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; @@ -140,12 +140,20 @@ impl Immix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { + let immix_space_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + let immix = Immix { immix_space: ImmixSpace::new( - plan_args.get_space_args("immix", true, VMRequest::discontiguous()), + plan_args.get_space_args("immix", true, immix_space_resp.unwrap()), space_args, ), - common: CommonPlan::new(plan_args), + common, last_gc_was_defrag: AtomicBool::new(false), }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index cd01b86df5..dbf08bf449 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,7 +15,7 @@ use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::CopySemantics; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; #[cfg(not(feature = "vo_bit"))] use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; @@ -192,13 +192,18 @@ impl MarkCompact { global_side_metadata_specs, }; + let mc_space_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + let mc_space = - MarkCompactSpace::new(plan_args.get_space_args("mc", true, VMRequest::discontiguous())); + MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_resp.unwrap())); - let res = MarkCompact { - mc_space, - common: CommonPlan::new(plan_args), - }; + let res = MarkCompact { mc_space, common }; res.verify_side_metadata_sanity(); diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 5e127d9d8c..54142b0dbb 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -10,7 +10,7 @@ use crate::plan::PlanConstraints; use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::VMWorkerThread; use crate::vm::VMBinding; @@ -102,13 +102,17 @@ impl MarkSweep { global_side_metadata_specs, }; + let ms_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + let res = MarkSweep { - ms: MarkSweepSpace::new(plan_args.get_space_args( - "ms", - true, - VMRequest::discontiguous(), - )), - common: CommonPlan::new(plan_args), + ms: MarkSweepSpace::new(plan_args.get_space_args("ms", true, ms_resp.unwrap())), + common, }; res.verify_side_metadata_sanity(); diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 8e013135ed..4629ced8f1 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -9,8 +9,7 @@ use crate::policy::immortalspace::ImmortalSpace; use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; -#[allow(unused_imports)] -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -88,23 +87,35 @@ impl NoGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + let nogc_space_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + let immortal_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + let los_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let base = BasePlan::new(&mut plan_args); + let res = NoGC { nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), - VMRequest::discontiguous(), + nogc_space_resp.unwrap(), )), immortal: ImmortalSpace::new(plan_args.get_space_args( "immortal", true, - VMRequest::discontiguous(), - )), - los: ImmortalSpace::new(plan_args.get_space_args( - "los", - true, - VMRequest::discontiguous(), + immortal_resp.unwrap(), )), - base: BasePlan::new(plan_args), + los: ImmortalSpace::new(plan_args.get_space_args("los", true, los_resp.unwrap())), + base, }; res.verify_side_metadata_sanity(); diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 44e25e2202..bd51dcde70 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -8,7 +8,7 @@ use crate::plan::PlanConstraints; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::{plan::global::BasePlan, vm::VMBinding}; use crate::{ @@ -97,12 +97,20 @@ impl PageProtect { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + let space_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + let ret = PageProtect { space: LargeObjectSpace::new( - plan_args.get_space_args("pageprotect", true, VMRequest::discontiguous()), + plan_args.get_space_args("pageprotect", true, space_resp.unwrap()), true, ), - common: CommonPlan::new(plan_args), + common, }; ret.verify_side_metadata_sanity(); diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index eaa7147c78..9894dc227d 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -11,7 +11,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::VMWorkerThread; use crate::{plan::global::BasePlan, vm::VMBinding}; @@ -137,17 +137,29 @@ impl SemiSpace { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + let copyspace0_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + let copyspace1_resp = plan_args + .global_args + .heap + .specify_space(VMRequest::Unrestricted); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + let res = SemiSpace { hi: AtomicBool::new(false), copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace0", true, copyspace0_resp.unwrap()), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace1", true, copyspace1_resp.unwrap()), true, ), - common: CommonPlan::new(plan_args), + common, }; res.verify_side_metadata_sanity(); diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 8d08ec1507..5ff5c0ed74 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -144,7 +144,7 @@ impl crate::policy::gc_work::PolicyTraceObject for CopySpace< impl CopySpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs, from_space: bool) -> Self { let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let common = CommonSpace::new(args.into_policy_args( true, false, diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 3809f7bd24..bce10bb6d9 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -288,9 +288,10 @@ impl ImmixSpace { let common = CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs())); ImmixSpace { - pr: if common.vmrequest.is_discontiguous() { + pr: if !common.space_meta.contiguous { BlockPageResource::new_discontiguous( Block::LOG_PAGES, + common.start, vm_map, scheduler.num_workers(), ) diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 5eeebd58c9..1894ebbd9b 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -121,7 +121,7 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmortalSp impl ImmortalSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let common = CommonSpace::new(args.into_policy_args( false, true, @@ -145,7 +145,7 @@ impl ImmortalSpace { start: Address, size: usize, ) -> Self { - assert!(!args.vmrequest.is_discontiguous()); + assert!(args.space_meta.contiguous); ImmortalSpace { mark_state: MarkState::new(), pr: MonotonePageResource::new_contiguous(start, size, args.vm_map), diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index ec6b2f7506..150713cc52 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -143,7 +143,7 @@ impl LargeObjectSpace { args: crate::policy::space::PlanCreateSpaceArgs, protect_memory_on_release: bool, ) -> Self { - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let vm_map = args.vm_map; let common = CommonSpace::new(args.into_policy_args( false, @@ -151,7 +151,7 @@ impl LargeObjectSpace { metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC]), )); let mut pr = if is_discontiguous { - FreeListPageResource::new_discontiguous(vm_map) + FreeListPageResource::new_discontiguous(common.start, vm_map) } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }; diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 693218b492..352fc80f7a 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -201,7 +201,7 @@ impl MarkCompactSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let local_specs = extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]); let common = CommonSpace::new(args.into_policy_args(true, false, local_specs)); MarkCompactSpace { diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 8d8eae7d0e..f35c22e040 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -200,7 +200,7 @@ impl MarkSweepSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> MarkSweepSpace { let scheduler = args.scheduler.clone(); let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let local_specs = { metadata::extract_side_metadata(&vec![ MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE), @@ -221,7 +221,7 @@ impl MarkSweepSpace { let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); MarkSweepSpace { pr: if is_discontiguous { - FreeListPageResource::new_discontiguous(vm_map) + FreeListPageResource::new_discontiguous(common.start, vm_map) } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }, diff --git a/src/policy/space.rs b/src/policy/space.rs index 56ae00f8af..efb5d0aa8e 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -2,18 +2,19 @@ use crate::global_state::GlobalState; use crate::plan::PlanConstraints; use crate::scheduler::GCWorkScheduler; use crate::util::conversions::*; +use crate::util::heap::heap_meta::VMResponse; use crate::util::metadata::side_metadata::{ SideMetadataContext, SideMetadataSanity, SideMetadataSpec, }; use crate::util::Address; use crate::util::ObjectReference; -use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK}; -use crate::util::heap::{PageResource, VMRequest}; +use crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK; +use crate::util::heap::PageResource; use crate::util::options::Options; use crate::vm::{ActivePlan, Collection}; -use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE}; +use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::conversions; use crate::util::opaque_pointer::*; @@ -27,7 +28,6 @@ use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::space_descriptor::SpaceDescriptor; -use crate::util::heap::HeapMeta; use crate::util::memory; use crate::vm::VMBinding; @@ -370,40 +370,15 @@ pub(crate) fn print_vm_map( write!(out, "N")?; } write!(out, " ")?; - if common.contiguous { - write!( - out, - "{}->{}", - common.start, - common.start + common.extent - 1 - )?; - match common.vmrequest { - VMRequest::Extent { extent, .. } => { - write!(out, " E {}", extent)?; - } - VMRequest::Fraction { frac, .. } => { - write!(out, " F {}", frac)?; - } - _ => {} - } - } else { - let mut a = space - .get_page_resource() - .common() - .get_head_discontiguous_region(); - while !a.is_zero() { - write!( - out, - "{}->{}", - a, - a + space.common().vm_map().get_contiguous_region_size(a) - 1 - )?; - a = space.common().vm_map().get_next_contiguous_region(a); - if !a.is_zero() { - write!(out, " ")?; - } - } - } + let VMResponse { + space_id, + start, + extent, + contiguous: is_contiguous, + } = common.space_meta; + write!(out, "{}->{}", start, start + extent - 1)?; + write!(out, " E {}", extent)?; + writeln!(out)?; Ok(()) @@ -414,10 +389,10 @@ impl_downcast!(Space where VM: VMBinding); pub struct CommonSpace { pub name: &'static str, pub descriptor: SpaceDescriptor, - pub vmrequest: VMRequest, + pub space_meta: VMResponse, /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know - // the copy semantics for the space. + /// the copy semantics for the space. pub copy: Option, immortal: bool, @@ -425,7 +400,15 @@ pub struct CommonSpace { pub contiguous: bool, pub zeroed: bool, + /// The lower bound of the address range of the space. + /// - If this space is contiguous, this space owns the address range + /// `start <= addr < start + extent`. + /// - If discontiguous, this space shares the address range `start <= addr < start + extent` + /// with other discontiguous spaces. This space only owns individual chunks in this range + /// managed by the `VMMap`. pub start: Address, + + /// The length of the address range of the space. See `start`. pub extent: usize, pub vm_map: &'static dyn VMMap, @@ -458,11 +441,10 @@ pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> { pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub name: &'static str, pub zeroed: bool, - pub vmrequest: VMRequest, + pub space_meta: VMResponse, pub global_side_metadata_specs: Vec, pub vm_map: &'static dyn VMMap, pub mmapper: &'static dyn Mmapper, - pub heap: &'a mut HeapMeta, pub constraints: &'a PlanConstraints, pub gc_trigger: Arc>, pub scheduler: Arc>, @@ -489,17 +471,33 @@ impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> { impl CommonSpace { pub fn new(args: PolicyCreateSpaceArgs) -> Self { - let mut rtn = CommonSpace { + let space_meta = args.plan_args.space_meta; + let VMResponse { + space_id: _space_id, // TODO: Let SpaceDescriptor use this space_id + start, + extent, + contiguous, + } = space_meta; + + let descriptor = if contiguous { + SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent) + } else { + // TODO: `create_descriptor` simply allocates the next "space index". + // We should let it use `space_id` instead. + SpaceDescriptor::create_descriptor() + }; + + let rtn = CommonSpace { name: args.plan_args.name, - descriptor: SpaceDescriptor::UNINITIALIZED, - vmrequest: args.plan_args.vmrequest, + descriptor, + space_meta, copy: None, immortal: args.immortal, movable: args.movable, - contiguous: true, + contiguous, zeroed: args.plan_args.zeroed, - start: unsafe { Address::zero() }, - extent: 0, + start, + extent, vm_map: args.plan_args.vm_map, mmapper: args.plan_args.mmapper, needs_log_bit: args.plan_args.constraints.needs_log_bit, @@ -513,79 +511,40 @@ impl CommonSpace { p: PhantomData, }; - let vmrequest = args.plan_args.vmrequest; - if vmrequest.is_discontiguous() { - rtn.contiguous = false; - // FIXME - rtn.descriptor = SpaceDescriptor::create_descriptor(); - // VM.memory.setHeapRange(index, HEAP_START, HEAP_END); - return rtn; - } - - let (extent, top) = match vmrequest { - VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top), - VMRequest::Extent { - extent: _extent, - top: _top, - } => (_extent, _top), - VMRequest::Fixed { - extent: _extent, .. - } => (_extent, false), - _ => unreachable!(), - }; - - assert!( - extent == raw_align_up(extent, BYTES_IN_CHUNK), - "{} requested non-aligned extent: {} bytes", - rtn.name, - extent - ); - - let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest { - _start - } else { - // FIXME - //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name); - args.plan_args.heap.reserve(extent, top) - }; - assert!( - start == chunk_align_up(start), - "{} starting on non-aligned boundary: {}", - rtn.name, - start - ); - - rtn.contiguous = true; - rtn.start = start; - rtn.extent = extent; - // FIXME - rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent); - // VM.memory.setHeapRange(index, start, start.plus(extent)); - - // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces, - // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only - // insert into our vm map if the range overlaps with our heap. - { - use crate::util::heap::layout; - let overlap = - Address::range_intersection(&(start..start + extent), &layout::available_range()); - if !overlap.is_empty() { - args.plan_args.vm_map.insert( - overlap.start, - overlap.end - overlap.start, - rtn.descriptor, + if contiguous { + // If the space is contiguous, it implies that the address range + // `start <= addr < start + extent` is solely owned by one space. + // We can eagerly insert `SpaceDescriptor` entries and map metadata. + // If the space is discontiguous, we do this lazily when we allocate chunks from the + // global free list. + + // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces, + // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only + // insert into our vm map if the range overlaps with our heap. + { + use crate::util::heap::layout; + let overlap = Address::range_intersection( + &(start..start + extent), + &layout::available_range(), ); + if !overlap.is_empty() { + args.plan_args.vm_map.insert( + overlap.start, + overlap.end - overlap.start, + rtn.descriptor, + ); + } } - } - // For contiguous space, we know its address range so we reserve metadata memory for its range. - if rtn - .metadata - .try_map_metadata_address_range(rtn.start, rtn.extent) - .is_err() - { - // TODO(Javad): handle meta space allocation failure - panic!("failed to mmap meta memory"); + // For contiguous space, we know its address range so we reserve metadata memory for its range. + if rtn + .metadata + .try_map_metadata_address_range(rtn.start, rtn.extent) + .is_err() + { + // TODO(Javad): handle meta space allocation failure + panic!("failed to mmap meta memory"); + } } debug!( @@ -620,19 +579,6 @@ impl CommonSpace { } } -fn get_frac_available(frac: f32) -> usize { - trace!("AVAILABLE_START={}", vm_layout().available_start()); - trace!("AVAILABLE_END={}", vm_layout().available_end()); - let bytes = (frac * vm_layout().available_bytes() as f32) as usize; - trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); - let mb = bytes >> LOG_BYTES_IN_MBYTE; - let rtn = mb << LOG_BYTES_IN_MBYTE; - trace!("rtn={}", rtn); - let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK); - trace!("aligned_rtn={}", aligned_rtn); - aligned_rtn -} - pub fn required_chunks(pages: usize) -> usize { let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK); extent >> LOG_BYTES_IN_CHUNK diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs index 5b3ffbc2f2..5833922eda 100644 --- a/src/util/heap/blockpageresource.rs +++ b/src/util/heap/blockpageresource.rs @@ -73,12 +73,13 @@ impl BlockPageResource { pub fn new_discontiguous( log_pages: usize, + start: Address, vm_map: &'static dyn VMMap, num_workers: usize, ) -> Self { assert!((1 << log_pages) <= PAGES_IN_CHUNK); Self { - flpr: FreeListPageResource::new_discontiguous(vm_map), + flpr: FreeListPageResource::new_discontiguous(start, vm_map), block_queue: BlockPool::new(num_workers), sync: Mutex::new(()), } diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 756ccfda03..864503b126 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -179,6 +179,9 @@ impl PageResource for FreeListPageResource { } impl FreeListPageResource { + /// Create a contiguous free list page resource. + /// + /// The page resource will span over the address range from `start` to `start + bytes`. pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self { let pages = conversions::bytes_to_pages(bytes); let common_flpr = { @@ -209,9 +212,12 @@ impl FreeListPageResource { } } - pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self { + /// Create a discontiguous free list page resource. + /// + /// `start` will be used as the base address for computing chunk addresses from free list + /// indices. We don't need to compute the extent here. + pub fn new_discontiguous(start: Address, vm_map: &'static dyn VMMap) -> Self { let common_flpr = { - let start = vm_layout().available_start(); let common_flpr = Box::new(CommonFreeListPageResource { free_list: vm_map.create_freelist(start), start, diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 179d95a904..46a3948df1 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -1,46 +1,295 @@ +//! This module determines the address ranges of spaces of a plan according to the specifications +//! given by the plan. +//! +//! [`HeapMeta`] is the helper type for space placement, and is a prerequisite of creating plans. +//! It is used as following. +//! +//! 1. A plan declares all the spaces it wants to create using the `specify_space` method. For +//! each space, it passes a [`VMRequest`] which specifies the requirements for each space, +//! including whether the space is contiguous, whether it has a fixed extent, and whether it +//! should be place at the low end or high end of the heap range, etc. The `specify_space` +//! method returns a [`PendingVMResponse`] for each space which can be used later. +//! 2. After all spaces are specified, the plan calls the `place_spaces` method. It determines +//! the locations (starts and extends) and contiguousness of all spaces according to the policy +//! specified by [`crate::util::heap::layout::vm_layout::vm_layout`]. +//! 3. Then the plan calls `unwrap()` on each [`PendingVMResponse`] to get a [`VMResponse`] which +//! holds the the placement decision for each space (start, extent, contiguousness, etc.). +//! Using such information, the space can create each concrete spaces. +//! +//! In summary, the plan specifies all spaces before [`HeapMeta`] makes placement decision, and all +//! spaces know their locations the moment they are created. +//! +//! By doing so, we can avoid creating spaces first and then computing their start addresses and +//! mutate those spaces. JikesRVM's MMTk used to do that, but such practice is unfriendly to Rust +//! which has strict ownership and mutability rules. + +use std::cell::RefCell; +use std::ops::Range; +use std::rc::Rc; + +use crate::util::constants::LOG_BYTES_IN_MBYTE; +use crate::util::conversions::raw_align_up; use crate::util::heap::layout::vm_layout::vm_layout; +use crate::util::heap::vm_layout::BYTES_IN_CHUNK; use crate::util::Address; +/// This struct is used to determine the placement of each space during the creation of a Plan. +/// Read the module-level documentation for how to use. +/// +/// TODO: This type needs a better name. pub struct HeapMeta { - pub heap_cursor: Address, - pub heap_limit: Address, + /// The start of the heap range (inclusive). + heap_start: Address, + /// The end of the heap range (exclusive). + heap_limit: Address, + /// The address range for discontiguous spaces (if exists). + discontiguous_range: Option>, + /// Request-response pairs for each space. + entries: Vec, +} + +/// A request-response pair. +struct SpaceEntry { + req: VMRequest, + resp: PendingVMResponseWriter, +} + +/// A virtual memory (VM) request specifies the requirement for placing a space in the virtual +/// address space. It will be processed by [`HeapMeta`]. +/// +/// Note that the result of space placement (represented by [`VMResponse`]) may give the space a +/// larger address range than requested. For example, on systems with a generous address space, +/// the space placement strategy may give each space a contiguous 2TiB address space even if it +/// requests a small extent. +#[derive(Debug)] +pub enum VMRequest { + /// There is no size, location, or contiguousness requirement for the space. In a confined + /// address space, the space may be given a discontiguous address range shared with other + /// spaces; in a generous address space, the space may be given a very large contiguous address + /// range solely owned by this space. + Unrestricted, + /// Require a contiguous range of address of a fixed size. + Extent { + /// The size of the space, in bytes. Must be a multiple of chunks. + extent: usize, + /// `true` if the space should be placed at the high end of the heap range; `false` if it + /// should be placed at the low end of the heap range. + top: bool, + }, + /// Require a contiguous range of address, and its size should be a fraction of the total heap + /// size. + Fraction { + /// The size of the space as a fraction of the heap size. The size will be rounded to a + /// multiple of chunks. + frac: f32, + /// `true` if the space should be placed at the high end of the heap range; `false` if it + /// should be placed at the low end of the heap range. + top: bool, + }, +} + +impl VMRequest { + /// Return `true` if the current `VMRequest` is unrestricted. + fn unrestricted(&self) -> bool { + matches!(self, VMRequest::Unrestricted) + } + + /// Return `true` if the space should be placed at the high end of the address space. + fn top(&self) -> bool { + match *self { + VMRequest::Unrestricted => false, + VMRequest::Extent { top, .. } => top, + VMRequest::Fraction { top, .. } => top, + } + } +} + +/// This struct represents the placement decision of a space. +#[derive(Debug)] +pub struct VMResponse { + /// An assigned ID of the space. Guaranteed to be unique. + pub space_id: usize, + /// The start of the address range of the space. For discontiguous spaces, this range will be + /// shared with other discontiguous spaces. + pub start: Address, + /// The extent of the address range of the space. + pub extent: usize, + /// `true` if the space is contiguous. + pub contiguous: bool, +} + +impl VMResponse { + /// Create a dummy `VMResponse` for `VMSpace` because the address range of `VMSpace` is not + /// determined by `HeapMeta`. + pub(crate) fn vm_space_dummy() -> Self { + Self { + space_id: usize::MAX, + start: Address::ZERO, + extent: 0, + contiguous: false, + } + } +} + +/// A `VMResponse` that will be provided in the future. +#[derive(Clone)] +pub struct PendingVMResponse { + inner: Rc>>, +} + +impl PendingVMResponse { + /// Unwrap `self` and get a `VMResponse` instance. Can only be called after calling + /// `HeapMeta::place_spaces()`. + pub fn unwrap(self) -> VMResponse { + let mut opt = self.inner.borrow_mut(); + opt.take() + .expect("Attempt to get VMResponse before calling HeapMeta::place_spaces()") + } +} + +/// The struct for `HeapMeta` to provide a `VMResponse` instance for its user. +struct PendingVMResponseWriter { + inner: Rc>>, +} + +impl PendingVMResponseWriter { + fn provide(&mut self, resp: VMResponse) { + let mut opt = self.inner.borrow_mut(); + assert!(opt.is_none()); + *opt = Some(resp); + } } impl HeapMeta { + /// Create a `HeapMeta` instance. The heap range will be determined by + /// [`crate::util::heap::layout::vm_layout::vm_layout`]. pub fn new() -> Self { HeapMeta { - heap_cursor: vm_layout().heap_start, + heap_start: vm_layout().heap_start, heap_limit: vm_layout().heap_end, + entries: Vec::default(), + discontiguous_range: None, } } - pub fn reserve(&mut self, extent: usize, top: bool) -> Address { - let ret = if top { - self.heap_limit -= extent; - self.heap_limit - } else { - let start = self.heap_cursor; - self.heap_cursor += extent; - start + /// Declare a space and specify the detailed requirements. + pub fn specify_space(&mut self, req: VMRequest) -> PendingVMResponse { + let shared_resp = Rc::new(RefCell::new(None)); + let pending_resp = PendingVMResponse { + inner: shared_resp.clone(), }; + let resp = PendingVMResponseWriter { inner: shared_resp }; + self.entries.push(SpaceEntry { req, resp }); + pending_resp + } - assert!( - self.heap_cursor <= self.heap_limit, - "Out of virtual address space at {} ({} > {})", - self.heap_cursor - extent, - self.heap_cursor, - self.heap_limit - ); + /// Determine the locations of all specified spaces. + pub fn place_spaces(&mut self) { + let force_use_contiguous_spaces = vm_layout().force_use_contiguous_spaces; - ret - } + let mut reserver = AddressRangeReserver::new(self.heap_start, self.heap_limit); + + if force_use_contiguous_spaces { + debug!( + "Placing spaces in a generous address space: [{}, {})", + self.heap_start, self.heap_limit + ); + let extent = vm_layout().max_space_extent(); + + for (i, entry) in self.entries.iter_mut().enumerate() { + let top = entry.req.top(); + let start = reserver.reserve(extent, top); + + let resp = VMResponse { + space_id: i, + start, + extent, + contiguous: true, + }; + + debug!(" VMResponse: {:?}", resp); + entry.resp.provide(resp); + } + } else { + debug!( + "Placing spaces in a confined address space: [{}, {})", + self.heap_start, self.heap_limit + ); + for (i, entry) in self.entries.iter_mut().enumerate() { + let (start, extent) = match entry.req { + VMRequest::Unrestricted => continue, + VMRequest::Extent { extent, top } => { + let start = reserver.reserve(extent, top); + (start, extent) + } + VMRequest::Fraction { frac, top } => { + // Taken from `crate::policy::space::get_frac_available`, but we currently + // don't have any plans that actually use it. + let extent = { + trace!("AVAILABLE_START={}", self.heap_start); + trace!("AVAILABLE_END={}", self.heap_limit); + let available_bytes = self.heap_limit - self.heap_start; + let bytes = (frac * available_bytes as f32) as usize; + trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); + let mb = bytes >> LOG_BYTES_IN_MBYTE; + let rtn = mb << LOG_BYTES_IN_MBYTE; + trace!("rtn={}", rtn); + let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK); + trace!("aligned_rtn={}", aligned_rtn); + aligned_rtn + }; + let start = reserver.reserve(extent, top); + (start, extent) + } + }; + + let resp = VMResponse { + space_id: i, + start, + extent, + contiguous: true, + }; + + debug!(" VMResponse: {:?}", resp); + entry.resp.provide(resp); + } + + let discontig_range = reserver.remaining_range(); + self.discontiguous_range = Some(discontig_range.clone()); + let Range { + start: discontig_start, + end: discontig_end, + } = discontig_range; - pub fn get_discontig_start(&self) -> Address { - self.heap_cursor + debug!( + "Discontiguous range is [{}, {})", + discontig_start, discontig_end + ); + + let discontig_extent = discontig_end - discontig_start; + for (i, entry) in self.entries.iter_mut().enumerate() { + if !entry.req.unrestricted() { + continue; + } + + let resp = VMResponse { + space_id: i, + start: discontig_start, + extent: discontig_extent, + contiguous: false, + }; + + debug!(" VMResponse: {:?}", resp); + entry.resp.provide(resp); + } + } + + debug!("Space placement finished."); } - pub fn get_discontig_end(&self) -> Address { - self.heap_limit - 1 + /// Get the shared address range for discontigous spaces. + pub fn get_discontiguous_range(&self) -> Option> { + self.discontiguous_range.clone() } } @@ -50,3 +299,46 @@ impl Default for HeapMeta { Self::new() } } + +/// A helper struct for reserving spaces from both ends of an address region. +struct AddressRangeReserver { + pub lower_bound: Address, + pub upper_bound: Address, +} + +impl AddressRangeReserver { + pub fn new(lower_bound: Address, upper_bound: Address) -> Self { + assert!(lower_bound.is_aligned_to(BYTES_IN_CHUNK)); + assert!(upper_bound.is_aligned_to(BYTES_IN_CHUNK)); + + Self { + lower_bound, + upper_bound, + } + } + + pub fn reserve(&mut self, extent: usize, top: bool) -> Address { + let ret = if top { + self.upper_bound -= extent; + self.upper_bound + } else { + let start = self.lower_bound; + self.lower_bound += extent; + start + }; + + assert!( + self.lower_bound <= self.upper_bound, + "Out of virtual address space at {} ({} > {})", + self.lower_bound - extent, + self.lower_bound, + self.upper_bound + ); + + ret + } + + pub fn remaining_range(&self) -> Range
{ + self.lower_bound..self.upper_bound + } +} diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index c4aa08f52f..9f7fc4f7fb 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -9,7 +9,6 @@ use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::int_array_freelist::IntArrayFreeList; use crate::util::Address; use std::cell::UnsafeCell; -use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Mutex, MutexGuard}; @@ -25,7 +24,6 @@ pub struct Map32Inner { region_map: IntArrayFreeList, global_page_map: IntArrayFreeList, shared_discontig_fl_count: usize, - shared_fl_map: Vec>>, total_available_discontiguous_chunks: usize, finalized: bool, descriptor_map: Vec, @@ -50,7 +48,6 @@ impl Map32 { region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1), global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES), shared_discontig_fl_count: 0, - shared_fl_map: vec![None; MAX_SPACES], total_available_discontiguous_chunks: 0, finalized: false, descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks], @@ -107,15 +104,7 @@ impl VMMap for Map32 { Box::new(IntArrayFreeList::new(units, grain, 1)) } - unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource) { - let ordinal: usize = (*pr) - .free_list - .downcast_ref::() - .unwrap() - .get_ordinal() as usize; - let self_mut: &mut Map32Inner = self.mut_self(); - self_mut.shared_fl_map[ordinal] = Some(NonNull::new_unchecked(pr as *mut _)); - } + unsafe fn bind_freelist(&self, _pr: *const CommonFreeListPageResource) {} unsafe fn allocate_contiguous_chunks( &self, @@ -204,24 +193,15 @@ impl VMMap for Map32 { /* establish bounds of discontiguous space */ let start_address = from; let first_chunk = start_address.chunk_index(); - let last_chunk = to.chunk_index(); + let last_byte = to - 1; + let last_chunk = last_byte.chunk_index(); let unavail_start_chunk = last_chunk + 1; let trailing_chunks = vm_layout().max_chunks() - unavail_start_chunk; let pages = (1 + last_chunk - first_chunk) * PAGES_IN_CHUNK; // start_address=0xb0000000, first_chunk=704, last_chunk=703, unavail_start_chunk=704, trailing_chunks=320, pages=0 // startAddress=0x68000000 firstChunk=416 lastChunk=703 unavailStartChunk=704 trailingChunks=320 pages=294912 self_mut.global_page_map.resize_freelist(pages, pages as _); - // TODO: Clippy favors using iter().flatten() rather than iter() with if-let. - // https://rust-lang.github.io/rust-clippy/master/index.html#manual_flatten - // Yi: I am not doing this refactoring right now, as I am not familiar with flatten() and - // there is no test to ensure the refactoring will be correct. - #[allow(clippy::manual_flatten)] - for fl in self_mut.shared_fl_map.iter().copied() { - if let Some(mut fl) = fl { - let fl_mut = unsafe { fl.as_mut() }; - fl_mut.resize_freelist(start_address); - } - } + // [ // 2: -1073741825 // 3: -1073741825 diff --git a/src/util/heap/mod.rs b/src/util/heap/mod.rs index e980b5adff..34d18595fe 100644 --- a/src/util/heap/mod.rs +++ b/src/util/heap/mod.rs @@ -6,17 +6,14 @@ pub(crate) mod chunk_map; pub(crate) mod externalpageresource; pub(crate) mod freelistpageresource; pub(crate) mod gc_trigger; -mod heap_meta; +pub(crate) mod heap_meta; pub(crate) mod monotonepageresource; pub(crate) mod pageresource; pub(crate) mod space_descriptor; -mod vmrequest; pub(crate) use self::accounting::PageAccounting; pub(crate) use self::blockpageresource::BlockPageResource; pub(crate) use self::freelistpageresource::FreeListPageResource; -pub(crate) use self::heap_meta::HeapMeta; pub use self::layout::vm_layout; pub(crate) use self::monotonepageresource::MonotonePageResource; pub(crate) use self::pageresource::PageResource; -pub(crate) use self::vmrequest::VMRequest; diff --git a/src/util/heap/vmrequest.rs b/src/util/heap/vmrequest.rs deleted file mode 100644 index 7a6f8148f8..0000000000 --- a/src/util/heap/vmrequest.rs +++ /dev/null @@ -1,69 +0,0 @@ -use super::layout::vm_layout::*; -use crate::util::constants::*; -use crate::util::Address; - -#[derive(Clone, Copy, Debug)] -pub enum VMRequest { - Discontiguous, - Fixed { start: Address, extent: usize }, - Extent { extent: usize, top: bool }, - Fraction { frac: f32, top: bool }, -} - -impl VMRequest { - pub fn is_discontiguous(&self) -> bool { - matches!(self, VMRequest::Discontiguous { .. }) - } - - pub fn common64bit(top: bool) -> Self { - VMRequest::Extent { - extent: vm_layout().max_space_extent(), - top, - } - } - - pub fn discontiguous() -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(false); - } - VMRequest::Discontiguous - } - - pub fn fixed_size(mb: usize) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(false); - } - VMRequest::Extent { - extent: mb << LOG_BYTES_IN_MBYTE, - top: false, - } - } - - pub fn fraction(frac: f32) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(false); - } - VMRequest::Fraction { frac, top: false } - } - - pub fn high_fixed_size(mb: usize) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(true); - } - VMRequest::Extent { - extent: mb << LOG_BYTES_IN_MBYTE, - top: true, - } - } - - pub fn fixed_extent(extent: usize, top: bool) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(top); - } - VMRequest::Extent { extent, top } - } - - pub fn fixed(start: Address, extent: usize) -> Self { - VMRequest::Fixed { start, extent } - } -}