From 6e73b074df51718396ab5fe842c3f82677c90a51 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 27 Oct 2023 21:39:22 +0800 Subject: [PATCH 01/15] WIP: Basic data structures for space placing. --- src/util/heap/heap_meta.rs | 89 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 89 insertions(+) diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 179d95a904..2e293fd90d 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -1,9 +1,87 @@ +use std::cell::RefCell; +use std::rc::Rc; + use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::Address; +/// This struct is used to determine the placement of each space during the creation of a Plan. +/// +/// TODO: This type needs a better name. pub struct HeapMeta { pub heap_cursor: Address, pub heap_limit: Address, + entries: Vec, +} + +struct SpaceEntry { + spec: SpaceSpec, + promise_meta: PromiseSpaceMeta, +} + +/// This enum specifies the requirement of space placement. +/// +/// Note that the result of space placement (represented by `SpaceMeta`) may give the space a +/// larger address range than requested. For example, on systems with a generous address space, +/// the space placement strategy may give each space a contiguous 2TiB address space even if it +/// requests a small extent. +pub enum SpaceSpec { + /// There is no size or place requirement for the space. The space may be given a very large + /// contiguous or discontiguous space range of address, depending on the strategy. + DontCare, + /// Require a contiguous range of address of a fixed size. + Extent { + /// The size of the space, in bytes. Must be a multiple of chunks. + extent: usize, + /// `true` if the space should be placed at the high end of the heap range; `false` if it + /// should be placed at the low end of the heap range. + top: bool, + }, + /// Require a contiguous range of address, and its size should be a fraction of the total heap + /// size. + Fraction { + /// The size of the space as a fraction of the heap size. The size will be rounded to a + /// multiple of chunks. + frac: f32, + /// `true` if the space should be placed at the high end of the heap range; `false` if it + /// should be placed at the low end of the heap range. + top: bool, + }, +} + +/// This struct represents the placement decision of a space. +pub struct SpaceMeta { + pub start: Address, + pub extent: usize, + pub is_contiguous: bool, +} + +/// A space meta that will be provided in the future. +#[derive(Clone)] +pub struct FutureSpaceMeta { + inner: Rc>>, +} + +impl FutureSpaceMeta { + /// Unwrap `self` and get a `SpaceMeta` instance. Can only be called after calling + /// `HeapMeta::place_spaces()`. + pub fn unwrap(self) -> SpaceMeta { + let mut opt = self.inner.borrow_mut(); + opt.take() + .expect("Attempt to get SpaceMeta before calling HeapMeta::place_spaces()") + } +} + +/// The struct for HeapMeta to provide a SpaceMeta instance for its user. +struct PromiseSpaceMeta { + inner: Rc>>, +} + +impl PromiseSpaceMeta { + fn provide(&mut self, space_meta: SpaceMeta) { + let mut opt = self.inner.borrow_mut(); + assert!(opt.is_none()); + *opt = Some(space_meta); + } } impl HeapMeta { @@ -11,9 +89,20 @@ impl HeapMeta { HeapMeta { heap_cursor: vm_layout().heap_start, heap_limit: vm_layout().heap_end, + entries: Vec::default(), } } + pub fn specify_space(&mut self, spec: SpaceSpec) -> FutureSpaceMeta { + let shared_meta = Rc::new(RefCell::new(None)); + let future_meta = FutureSpaceMeta { + inner: shared_meta.clone(), + }; + let promise_meta = PromiseSpaceMeta { inner: shared_meta }; + self.entries.push(SpaceEntry { spec, promise_meta }); + future_meta + } + pub fn reserve(&mut self, extent: usize, top: bool) -> Address { let ret = if top { self.heap_limit -= extent; From fcdcf1711468b7c87c75db0bc03731d1d691ba7c Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 27 Oct 2023 22:11:19 +0800 Subject: [PATCH 02/15] WIP: Basic space-placing algorithm. --- src/mmtk.rs | 2 +- src/policy/space.rs | 3 +- src/util/heap/heap_meta.rs | 139 +++++++++++++++++++++++++++++++------ 3 files changed, 119 insertions(+), 25 deletions(-) diff --git a/src/mmtk.rs b/src/mmtk.rs index 12f3e77cba..e6c6f536b8 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -189,7 +189,7 @@ impl MMTK { // TODO: This probably does not work if we have multiple MMTk instances. VM_MAP.boot(); // This needs to be called after we create Plan. It needs to use HeapMeta, which is gradually built when we create spaces. - VM_MAP.finalize_static_space_map(heap.get_discontig_start(), heap.get_discontig_end()); + //VM_MAP.finalize_static_space_map(heap.get_discontig_start(), heap.get_discontig_end()); if *options.transparent_hugepages { MMAPPER.set_mmap_strategy(crate::util::memory::MmapStrategy::TransparentHugePages); diff --git a/src/policy/space.rs b/src/policy/space.rs index 56ae00f8af..1b6b46bf96 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -546,7 +546,8 @@ impl CommonSpace { } else { // FIXME //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name); - args.plan_args.heap.reserve(extent, top) + //args.plan_args.heap.reserve(extent, top) + todo!() }; assert!( start == chunk_align_up(start), diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 2e293fd90d..ca40652cf2 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -2,13 +2,14 @@ use std::cell::RefCell; use std::rc::Rc; use crate::util::heap::layout::vm_layout::vm_layout; +use crate::util::heap::vm_layout::BYTES_IN_CHUNK; use crate::util::Address; /// This struct is used to determine the placement of each space during the creation of a Plan. /// /// TODO: This type needs a better name. pub struct HeapMeta { - pub heap_cursor: Address, + pub heap_start: Address, pub heap_limit: Address, entries: Vec, } @@ -48,8 +49,23 @@ pub enum SpaceSpec { }, } +impl SpaceSpec { + fn dont_care(&self) -> bool { + matches!(self, SpaceSpec::DontCare) + } + + fn top(&self) -> bool { + match *self { + SpaceSpec::DontCare => false, + SpaceSpec::Extent { top, .. } => top, + SpaceSpec::Fraction { top, .. } => top, + } + } +} + /// This struct represents the placement decision of a space. pub struct SpaceMeta { + pub space_id: usize, pub start: Address, pub extent: usize, pub is_contiguous: bool, @@ -87,7 +103,7 @@ impl PromiseSpaceMeta { impl HeapMeta { pub fn new() -> Self { HeapMeta { - heap_cursor: vm_layout().heap_start, + heap_start: vm_layout().heap_start, heap_limit: vm_layout().heap_end, entries: Vec::default(), } @@ -103,39 +119,116 @@ impl HeapMeta { future_meta } + pub fn place_spaces(&mut self) { + let force_use_contiguous_spaces = vm_layout().force_use_contiguous_spaces; + + let mut reserver = AddressRangeReserver::new(self.heap_start, self.heap_limit); + + if force_use_contiguous_spaces { + let extent = vm_layout().max_space_extent(); + + for (i, entry) in self.entries.iter_mut().enumerate() { + let top = entry.spec.top(); + let start = reserver.reserve(extent, top); + + let meta = SpaceMeta { + space_id: i, + start, + extent, + is_contiguous: true, + }; + + entry.promise_meta.provide(meta); + } + } else { + for (i, entry) in self.entries.iter_mut().enumerate() { + let (start, extent) = match entry.spec { + SpaceSpec::DontCare => continue, + SpaceSpec::Extent { extent, top } => { + let start = reserver.reserve(extent, top); + (start, extent) + } + SpaceSpec::Fraction { .. } => { + todo!("Currently none of our plans require spaces of a fraction of the address space.") + } + }; + + let meta = SpaceMeta { + space_id: i, + start, + extent, + is_contiguous: true, + }; + + entry.promise_meta.provide(meta); + } + + let (discontig_start, discontig_end) = reserver.remaining_range(); + let discontig_extent = discontig_end - discontig_start; + for (i, entry) in self.entries.iter_mut().enumerate() { + if !entry.spec.dont_care() { + continue; + } + + let meta = SpaceMeta { + space_id: i, + start: discontig_start, + extent: discontig_extent, + is_contiguous: false, + }; + + entry.promise_meta.provide(meta); + } + } + } +} + +// make clippy happy +impl Default for HeapMeta { + fn default() -> Self { + Self::new() + } +} + +/// A helper struct for reserving spaces from both ends of an address region. +struct AddressRangeReserver { + pub lower_bound: Address, + pub upper_bound: Address, +} + +impl AddressRangeReserver { + pub fn new(lower_bound: Address, upper_bound: Address) -> Self { + assert!(lower_bound.is_aligned_to(BYTES_IN_CHUNK)); + assert!(upper_bound.is_aligned_to(BYTES_IN_CHUNK)); + + Self { + lower_bound, + upper_bound, + } + } + pub fn reserve(&mut self, extent: usize, top: bool) -> Address { let ret = if top { - self.heap_limit -= extent; - self.heap_limit + self.upper_bound -= extent; + self.upper_bound } else { - let start = self.heap_cursor; - self.heap_cursor += extent; + let start = self.lower_bound; + self.lower_bound += extent; start }; assert!( - self.heap_cursor <= self.heap_limit, + self.lower_bound <= self.upper_bound, "Out of virtual address space at {} ({} > {})", - self.heap_cursor - extent, - self.heap_cursor, - self.heap_limit + self.lower_bound - extent, + self.lower_bound, + self.upper_bound ); ret } - pub fn get_discontig_start(&self) -> Address { - self.heap_cursor - } - - pub fn get_discontig_end(&self) -> Address { - self.heap_limit - 1 - } -} - -// make clippy happy -impl Default for HeapMeta { - fn default() -> Self { - Self::new() + pub fn remaining_range(&self) -> (Address, Address) { + (self.lower_bound, self.upper_bound) } } From f6cdcafa08ab25f6cf0bcf8b75ef8de5eddc008e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 27 Oct 2023 22:37:09 +0800 Subject: [PATCH 03/15] WIP: Refactor CommonSpace --- src/policy/space.rs | 139 ++++++++----------------------------- src/util/heap/heap_meta.rs | 30 ++++++-- src/util/heap/mod.rs | 3 +- 3 files changed, 55 insertions(+), 117 deletions(-) diff --git a/src/policy/space.rs b/src/policy/space.rs index 1b6b46bf96..6a988566b9 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -2,18 +2,19 @@ use crate::global_state::GlobalState; use crate::plan::PlanConstraints; use crate::scheduler::GCWorkScheduler; use crate::util::conversions::*; +use crate::util::heap::heap_meta::SpaceMeta; use crate::util::metadata::side_metadata::{ SideMetadataContext, SideMetadataSanity, SideMetadataSpec, }; use crate::util::Address; use crate::util::ObjectReference; -use crate::util::heap::layout::vm_layout::{vm_layout, LOG_BYTES_IN_CHUNK}; -use crate::util::heap::{PageResource, VMRequest}; +use crate::util::heap::layout::vm_layout::LOG_BYTES_IN_CHUNK; +use crate::util::heap::PageResource; use crate::util::options::Options; use crate::vm::{ActivePlan, Collection}; -use crate::util::constants::{LOG_BYTES_IN_MBYTE, LOG_BYTES_IN_PAGE}; +use crate::util::constants::LOG_BYTES_IN_PAGE; use crate::util::conversions; use crate::util::opaque_pointer::*; @@ -27,7 +28,6 @@ use crate::util::heap::layout::vm_layout::BYTES_IN_CHUNK; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::heap::space_descriptor::SpaceDescriptor; -use crate::util::heap::HeapMeta; use crate::util::memory; use crate::vm::VMBinding; @@ -370,40 +370,15 @@ pub(crate) fn print_vm_map( write!(out, "N")?; } write!(out, " ")?; - if common.contiguous { - write!( - out, - "{}->{}", - common.start, - common.start + common.extent - 1 - )?; - match common.vmrequest { - VMRequest::Extent { extent, .. } => { - write!(out, " E {}", extent)?; - } - VMRequest::Fraction { frac, .. } => { - write!(out, " F {}", frac)?; - } - _ => {} - } - } else { - let mut a = space - .get_page_resource() - .common() - .get_head_discontiguous_region(); - while !a.is_zero() { - write!( - out, - "{}->{}", - a, - a + space.common().vm_map().get_contiguous_region_size(a) - 1 - )?; - a = space.common().vm_map().get_next_contiguous_region(a); - if !a.is_zero() { - write!(out, " ")?; - } - } - } + let SpaceMeta { + space_id, + start, + extent, + contiguous: is_contiguous, + } = common.space_meta; + write!(out, "{}->{}", start, start + extent - 1)?; + write!(out, " E {}", extent)?; + writeln!(out)?; Ok(()) @@ -414,7 +389,7 @@ impl_downcast!(Space where VM: VMBinding); pub struct CommonSpace { pub name: &'static str, pub descriptor: SpaceDescriptor, - pub vmrequest: VMRequest, + pub space_meta: SpaceMeta, /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know // the copy semantics for the space. @@ -458,11 +433,10 @@ pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> { pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub name: &'static str, pub zeroed: bool, - pub vmrequest: VMRequest, + pub space_meta: SpaceMeta, pub global_side_metadata_specs: Vec, pub vm_map: &'static dyn VMMap, pub mmapper: &'static dyn Mmapper, - pub heap: &'a mut HeapMeta, pub constraints: &'a PlanConstraints, pub gc_trigger: Arc>, pub scheduler: Arc>, @@ -489,17 +463,27 @@ impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> { impl CommonSpace { pub fn new(args: PolicyCreateSpaceArgs) -> Self { + let space_meta = args.plan_args.space_meta; + let SpaceMeta { + space_id, + start, + extent, + contiguous, + } = space_meta; + + let descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent); + let mut rtn = CommonSpace { name: args.plan_args.name, - descriptor: SpaceDescriptor::UNINITIALIZED, - vmrequest: args.plan_args.vmrequest, + descriptor, + space_meta, copy: None, immortal: args.immortal, movable: args.movable, - contiguous: true, + contiguous, zeroed: args.plan_args.zeroed, - start: unsafe { Address::zero() }, - extent: 0, + start, + extent, vm_map: args.plan_args.vm_map, mmapper: args.plan_args.mmapper, needs_log_bit: args.plan_args.constraints.needs_log_bit, @@ -513,56 +497,6 @@ impl CommonSpace { p: PhantomData, }; - let vmrequest = args.plan_args.vmrequest; - if vmrequest.is_discontiguous() { - rtn.contiguous = false; - // FIXME - rtn.descriptor = SpaceDescriptor::create_descriptor(); - // VM.memory.setHeapRange(index, HEAP_START, HEAP_END); - return rtn; - } - - let (extent, top) = match vmrequest { - VMRequest::Fraction { frac, top: _top } => (get_frac_available(frac), _top), - VMRequest::Extent { - extent: _extent, - top: _top, - } => (_extent, _top), - VMRequest::Fixed { - extent: _extent, .. - } => (_extent, false), - _ => unreachable!(), - }; - - assert!( - extent == raw_align_up(extent, BYTES_IN_CHUNK), - "{} requested non-aligned extent: {} bytes", - rtn.name, - extent - ); - - let start = if let VMRequest::Fixed { start: _start, .. } = vmrequest { - _start - } else { - // FIXME - //if (HeapLayout.vmMap.isFinalized()) VM.assertions.fail("heap is narrowed after regionMap is finalized: " + name); - //args.plan_args.heap.reserve(extent, top) - todo!() - }; - assert!( - start == chunk_align_up(start), - "{} starting on non-aligned boundary: {}", - rtn.name, - start - ); - - rtn.contiguous = true; - rtn.start = start; - rtn.extent = extent; - // FIXME - rtn.descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent); - // VM.memory.setHeapRange(index, start, start.plus(extent)); - // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces, // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only // insert into our vm map if the range overlaps with our heap. @@ -621,19 +555,6 @@ impl CommonSpace { } } -fn get_frac_available(frac: f32) -> usize { - trace!("AVAILABLE_START={}", vm_layout().available_start()); - trace!("AVAILABLE_END={}", vm_layout().available_end()); - let bytes = (frac * vm_layout().available_bytes() as f32) as usize; - trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); - let mb = bytes >> LOG_BYTES_IN_MBYTE; - let rtn = mb << LOG_BYTES_IN_MBYTE; - trace!("rtn={}", rtn); - let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK); - trace!("aligned_rtn={}", aligned_rtn); - aligned_rtn -} - pub fn required_chunks(pages: usize) -> usize { let extent = raw_align_up(pages_to_bytes(pages), BYTES_IN_CHUNK); extent >> LOG_BYTES_IN_CHUNK diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index ca40652cf2..4b007283ee 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -1,6 +1,8 @@ use std::cell::RefCell; use std::rc::Rc; +use crate::util::constants::LOG_BYTES_IN_MBYTE; +use crate::util::conversions::raw_align_up; use crate::util::heap::layout::vm_layout::vm_layout; use crate::util::heap::vm_layout::BYTES_IN_CHUNK; use crate::util::Address; @@ -68,7 +70,7 @@ pub struct SpaceMeta { pub space_id: usize, pub start: Address, pub extent: usize, - pub is_contiguous: bool, + pub contiguous: bool, } /// A space meta that will be provided in the future. @@ -135,7 +137,7 @@ impl HeapMeta { space_id: i, start, extent, - is_contiguous: true, + contiguous: true, }; entry.promise_meta.provide(meta); @@ -148,8 +150,24 @@ impl HeapMeta { let start = reserver.reserve(extent, top); (start, extent) } - SpaceSpec::Fraction { .. } => { - todo!("Currently none of our plans require spaces of a fraction of the address space.") + SpaceSpec::Fraction { frac, top } => { + // Taken from `crate::policy::space::get_frac_available`, but we currently + // don't have any plans that actually uses it. + let extent = { + trace!("AVAILABLE_START={}", self.heap_start); + trace!("AVAILABLE_END={}", self.heap_limit); + let available_bytes = self.heap_limit - self.heap_start; + let bytes = (frac * available_bytes as f32) as usize; + trace!("bytes={}*{}={}", frac, vm_layout().available_bytes(), bytes); + let mb = bytes >> LOG_BYTES_IN_MBYTE; + let rtn = mb << LOG_BYTES_IN_MBYTE; + trace!("rtn={}", rtn); + let aligned_rtn = raw_align_up(rtn, BYTES_IN_CHUNK); + trace!("aligned_rtn={}", aligned_rtn); + aligned_rtn + }; + let start = reserver.reserve(extent, top); + (start, extent) } }; @@ -157,7 +175,7 @@ impl HeapMeta { space_id: i, start, extent, - is_contiguous: true, + contiguous: true, }; entry.promise_meta.provide(meta); @@ -174,7 +192,7 @@ impl HeapMeta { space_id: i, start: discontig_start, extent: discontig_extent, - is_contiguous: false, + contiguous: false, }; entry.promise_meta.provide(meta); diff --git a/src/util/heap/mod.rs b/src/util/heap/mod.rs index e980b5adff..f3cf1ff12b 100644 --- a/src/util/heap/mod.rs +++ b/src/util/heap/mod.rs @@ -6,7 +6,7 @@ pub(crate) mod chunk_map; pub(crate) mod externalpageresource; pub(crate) mod freelistpageresource; pub(crate) mod gc_trigger; -mod heap_meta; +pub(crate) mod heap_meta; pub(crate) mod monotonepageresource; pub(crate) mod pageresource; pub(crate) mod space_descriptor; @@ -15,7 +15,6 @@ mod vmrequest; pub(crate) use self::accounting::PageAccounting; pub(crate) use self::blockpageresource::BlockPageResource; pub(crate) use self::freelistpageresource::FreeListPageResource; -pub(crate) use self::heap_meta::HeapMeta; pub use self::layout::vm_layout; pub(crate) use self::monotonepageresource::MonotonePageResource; pub(crate) use self::pageresource::PageResource; From 5ae4e3cc302c29be655163b38983d8bcd0c83028 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 27 Oct 2023 22:42:06 +0800 Subject: [PATCH 04/15] WIP: Spaces no longer refer to VMRequest --- src/plan/global.rs | 7 +++---- src/policy/copyspace.rs | 2 +- src/policy/immix/immixspace.rs | 2 +- src/policy/immortalspace.rs | 4 ++-- src/policy/largeobjectspace.rs | 2 +- src/policy/markcompactspace.rs | 2 +- src/policy/marksweepspace/native_ms/global.rs | 2 +- 7 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index d99017a30f..c2780f7ed4 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -14,9 +14,9 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::{CopyConfig, GCWorkerCopyContext}; use crate::util::heap::gc_trigger::GCTrigger; +use crate::util::heap::heap_meta::{SpaceMeta, HeapMeta}; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; -use crate::util::heap::HeapMeta; use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; @@ -366,16 +366,15 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { &mut self, name: &'static str, zeroed: bool, - vmrequest: VMRequest, + space_meta: SpaceMeta, ) -> PlanCreateSpaceArgs { PlanCreateSpaceArgs { name, zeroed, - vmrequest, + space_meta, global_side_metadata_specs: self.global_side_metadata_specs.clone(), vm_map: self.global_args.vm_map, mmapper: self.global_args.mmapper, - heap: self.global_args.heap, constraints: self.constraints, gc_trigger: self.global_args.gc_trigger.clone(), scheduler: self.global_args.scheduler.clone(), diff --git a/src/policy/copyspace.rs b/src/policy/copyspace.rs index 8d08ec1507..5ff5c0ed74 100644 --- a/src/policy/copyspace.rs +++ b/src/policy/copyspace.rs @@ -144,7 +144,7 @@ impl crate::policy::gc_work::PolicyTraceObject for CopySpace< impl CopySpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs, from_space: bool) -> Self { let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let common = CommonSpace::new(args.into_policy_args( true, false, diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 3809f7bd24..ef1e7fee2c 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -288,7 +288,7 @@ impl ImmixSpace { let common = CommonSpace::new(args.into_policy_args(true, false, Self::side_metadata_specs())); ImmixSpace { - pr: if common.vmrequest.is_discontiguous() { + pr: if !common.space_meta.contiguous { BlockPageResource::new_discontiguous( Block::LOG_PAGES, vm_map, diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 5eeebd58c9..7fa8e127c8 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -121,7 +121,7 @@ impl crate::policy::gc_work::PolicyTraceObject for ImmortalSp impl ImmortalSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let common = CommonSpace::new(args.into_policy_args( false, true, @@ -145,7 +145,7 @@ impl ImmortalSpace { start: Address, size: usize, ) -> Self { - assert!(!args.vmrequest.is_discontiguous()); + assert!(!!args.space_meta.contiguous); ImmortalSpace { mark_state: MarkState::new(), pr: MonotonePageResource::new_contiguous(start, size, args.vm_map), diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index ec6b2f7506..15307b6743 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -143,7 +143,7 @@ impl LargeObjectSpace { args: crate::policy::space::PlanCreateSpaceArgs, protect_memory_on_release: bool, ) -> Self { - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let vm_map = args.vm_map; let common = CommonSpace::new(args.into_policy_args( false, diff --git a/src/policy/markcompactspace.rs b/src/policy/markcompactspace.rs index 693218b492..352fc80f7a 100644 --- a/src/policy/markcompactspace.rs +++ b/src/policy/markcompactspace.rs @@ -201,7 +201,7 @@ impl MarkCompactSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> Self { let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let local_specs = extract_side_metadata(&[*VM::VMObjectModel::LOCAL_MARK_BIT_SPEC]); let common = CommonSpace::new(args.into_policy_args(true, false, local_specs)); MarkCompactSpace { diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index 8d8eae7d0e..fdc820911f 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -200,7 +200,7 @@ impl MarkSweepSpace { pub fn new(args: crate::policy::space::PlanCreateSpaceArgs) -> MarkSweepSpace { let scheduler = args.scheduler.clone(); let vm_map = args.vm_map; - let is_discontiguous = args.vmrequest.is_discontiguous(); + let is_discontiguous = !args.space_meta.contiguous; let local_specs = { metadata::extract_side_metadata(&vec![ MetadataSpec::OnSide(Block::NEXT_BLOCK_TABLE), From 189122285e3e3719161a3496588f5dd8af2dc92d Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 27 Oct 2023 23:07:59 +0800 Subject: [PATCH 05/15] WIP: Plan create spaces in two steps --- src/mmtk.rs | 2 +- src/plan/generational/copying/global.rs | 16 +++++++--- src/plan/generational/global.rs | 18 ++++++++--- src/plan/generational/immix/global.rs | 14 +++++++-- src/plan/global.rs | 41 +++++++++++++++++-------- src/plan/immix/global.rs | 13 ++++++-- src/plan/markcompact/global.rs | 16 ++++++---- src/plan/marksweep/global.rs | 13 ++++++-- src/plan/nogc/global.rs | 22 ++++++++----- src/plan/pageprotect/global.rs | 13 ++++++-- src/plan/semispace/global.rs | 16 +++++++--- src/util/heap/heap_meta.rs | 12 ++++++++ 12 files changed, 145 insertions(+), 51 deletions(-) diff --git a/src/mmtk.rs b/src/mmtk.rs index e6c6f536b8..46693df401 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -12,9 +12,9 @@ use crate::util::analysis::AnalysisManager; use crate::util::edge_logger::EdgeLogger; use crate::util::finalizable_processor::FinalizableProcessor; use crate::util::heap::gc_trigger::GCTrigger; +use crate::util::heap::heap_meta::HeapMeta; use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; -use crate::util::heap::HeapMeta; use crate::util::opaque_pointer::*; use crate::util::options::Options; use crate::util::reference_processor::ReferenceProcessors; diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index c0cb53bfde..e01fc6452e 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -16,10 +16,10 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; +use crate::util::heap::heap_meta::SpaceSpec; use crate::vm::*; use crate::ObjectQueue; use enum_map::EnumMap; @@ -200,17 +200,25 @@ impl GenCopy { crate::plan::generational::new_generational_global_metadata_specs::(), }; + let heap_meta = plan_args.global_args.heap; + + let copyspace0_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let copyspace1_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let gen = CommonGenPlan::new(&mut plan_args); + let copyspace0 = CopySpace::new( - plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace0", true, copyspace0_spec.unwrap()), false, ); let copyspace1 = CopySpace::new( - plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace1", true, copyspace1_spec.unwrap()), true, ); let res = GenCopy { - gen: CommonGenPlan::new(plan_args), + gen, hi: AtomicBool::new(false), copyspace0, copyspace1, diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index e12430f576..370c9047e2 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -6,7 +6,7 @@ use crate::policy::copyspace::CopySpace; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::copy::CopySemantics; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::statistics::counter::EventCounter; use crate::util::Address; use crate::util::ObjectReference; @@ -37,20 +37,30 @@ pub struct CommonGenPlan { } impl CommonGenPlan { - pub fn new(mut args: CreateSpecificPlanArgs) -> Self { + pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { + let heap_meta = args.global_args.heap; + + let nursery_spec = heap_meta.specify_space(SpaceSpec::Extent { + extent: args.global_args.options.get_max_nursery_bytes(), + top: false, + }); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(args); + let nursery = CopySpace::new( args.get_space_args( "nursery", true, - VMRequest::fixed_extent(args.global_args.options.get_max_nursery_bytes(), false), + nursery_spec.unwrap(), ), true, ); + let full_heap_gc_count = args .global_args .stats .new_event_counter("majorGC", true, true); - let common = CommonPlan::new(args); CommonGenPlan { nursery, diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index f1e54cf058..4d34299246 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -17,10 +17,10 @@ use crate::scheduler::GCWorkScheduler; use crate::scheduler::GCWorker; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; +use crate::util::heap::heap_meta::SpaceSpec; use crate::vm::*; use crate::ObjectQueue; @@ -231,8 +231,16 @@ impl GenImmix { global_side_metadata_specs: crate::plan::generational::new_generational_global_metadata_specs::(), }; + + let heap_meta = args.heap; + + let immix_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let gen = CommonGenPlan::new(&mut plan_args); + let immix_space = ImmixSpace::new( - plan_args.get_space_args("immix_mature", true, VMRequest::discontiguous()), + plan_args.get_space_args("immix_mature", true, immix_space_spec.unwrap()), ImmixSpaceArgs { reset_log_bit_in_major_gc: false, // We don't need to unlog objects at tracing. Instead, we unlog objects at copying. @@ -244,7 +252,7 @@ impl GenImmix { ); let genimmix = GenImmix { - gen: CommonGenPlan::new(plan_args), + gen, immix_space, last_gc_was_defrag: AtomicBool::new(false), last_gc_was_full_heap: AtomicBool::new(false), diff --git a/src/plan/global.rs b/src/plan/global.rs index c2780f7ed4..a35c44aa78 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -14,10 +14,9 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::{CopyConfig, GCWorkerCopyContext}; use crate::util::heap::gc_trigger::GCTrigger; -use crate::util::heap::heap_meta::{SpaceMeta, HeapMeta}; +use crate::util::heap::heap_meta::{HeapMeta, SpaceMeta, SpaceSpec}; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; -use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataSanity; use crate::util::metadata::side_metadata::SideMetadataSpec; use crate::util::options::Options; @@ -387,30 +386,43 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { impl BasePlan { #[allow(unused_mut)] // 'args' only needs to be mutable for certain features pub fn new(mut args: CreateSpecificPlanArgs) -> BasePlan { + let heap_meta = args.global_args.heap; + + #[cfg(feature = "code_space")] + let code_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + #[cfg(feature = "code_space")] + let code_lo_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + #[cfg(feature = "ro_space")] + let ro_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + // NOTE: We don't specify VM space because it doesn't use SpaceMeta anyway. + + // BasePlan does not have any nested structs with spaces. We now place spaces. + heap_meta.place_spaces(); + BasePlan { #[cfg(feature = "code_space")] code_space: ImmortalSpace::new(args.get_space_args( "code_space", true, - VMRequest::discontiguous(), + code_space_spec.unwrap(), )), #[cfg(feature = "code_space")] code_lo_space: ImmortalSpace::new(args.get_space_args( "code_lo_space", true, - VMRequest::discontiguous(), + code_space_spec.unwrap(), )), #[cfg(feature = "ro_space")] ro_space: ImmortalSpace::new(args.get_space_args( "ro_space", true, - VMRequest::discontiguous(), + code_space_spec.unwrap(), )), #[cfg(feature = "vm_space")] vm_space: VMSpace::new(args.get_space_args( "vm_space", false, - VMRequest::discontiguous(), + SpaceMeta::dummy(), )), global_state: args.global_args.state.clone(), @@ -550,20 +562,25 @@ pub struct CommonPlan { impl CommonPlan { pub fn new(mut args: CreateSpecificPlanArgs) -> CommonPlan { + let heap_meta = args.global_args.heap; + + let immortal_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let los_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let nonmoving_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + let base = BasePlan::new(args); + CommonPlan { immortal: ImmortalSpace::new(args.get_space_args( "immortal", true, - VMRequest::discontiguous(), + immortal_spec.unwrap(), )), - los: LargeObjectSpace::new( - args.get_space_args("los", true, VMRequest::discontiguous()), - false, - ), + los: LargeObjectSpace::new(args.get_space_args("los", true, los_spec.unwrap()), false), nonmoving: ImmortalSpace::new(args.get_space_args( "nonmoving", true, - VMRequest::discontiguous(), + nonmoving_spec.unwrap(), )), base: BasePlan::new(args), } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 3b1d6dfbd2..80d6fea1ed 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -13,7 +13,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::vm::VMBinding; use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; @@ -140,12 +140,19 @@ impl Immix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { + let heap_meta = plan_args.global_args.heap; + + let immix_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(plan_args); + let immix = Immix { immix_space: ImmixSpace::new( - plan_args.get_space_args("immix", true, VMRequest::discontiguous()), + plan_args.get_space_args("immix", true, immix_space_spec.unwrap()), space_args, ), - common: CommonPlan::new(plan_args), + common, last_gc_was_defrag: AtomicBool::new(false), }; diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index cd01b86df5..8b00954162 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,7 +15,7 @@ use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::CopySemantics; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::metadata::side_metadata::SideMetadataContext; #[cfg(not(feature = "vo_bit"))] use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; @@ -192,13 +192,17 @@ impl MarkCompact { global_side_metadata_specs, }; + let heap_meta = args.heap; + + let mc_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(plan_args); + let mc_space = - MarkCompactSpace::new(plan_args.get_space_args("mc", true, VMRequest::discontiguous())); + MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_spec.unwrap())); - let res = MarkCompact { - mc_space, - common: CommonPlan::new(plan_args), - }; + let res = MarkCompact { mc_space, common }; res.verify_side_metadata_sanity(); diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 5e127d9d8c..42ae22fed5 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -10,7 +10,7 @@ use crate::plan::PlanConstraints; use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::VMWorkerThread; use crate::vm::VMBinding; @@ -102,13 +102,20 @@ impl MarkSweep { global_side_metadata_specs, }; + let heap_meta = args.heap; + + let ms_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(plan_args); + let res = MarkSweep { ms: MarkSweepSpace::new(plan_args.get_space_args( "ms", true, - VMRequest::discontiguous(), + ms_spec.unwrap(), )), - common: CommonPlan::new(plan_args), + common, }; res.verify_side_metadata_sanity(); diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 8e013135ed..5cf8795576 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -9,6 +9,7 @@ use crate::policy::immortalspace::ImmortalSpace; use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; +use crate::util::heap::heap_meta::SpaceSpec; #[allow(unused_imports)] use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; @@ -88,23 +89,28 @@ impl NoGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + let heap_meta = args.heap; + + let nogc_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let immortal_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let los = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let base = BasePlan::new(plan_args); + let res = NoGC { nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), - VMRequest::discontiguous(), + nogc_space_spec.unwrap(), )), immortal: ImmortalSpace::new(plan_args.get_space_args( "immortal", true, - VMRequest::discontiguous(), - )), - los: ImmortalSpace::new(plan_args.get_space_args( - "los", - true, - VMRequest::discontiguous(), + immortal_spec.unwrap(), )), - base: BasePlan::new(plan_args), + los: ImmortalSpace::new(plan_args.get_space_args("los", true, los.unwrap())), + base, }; res.verify_side_metadata_sanity(); diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 44e25e2202..0f9316a4c7 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -8,7 +8,7 @@ use crate::plan::PlanConstraints; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::{plan::global::BasePlan, vm::VMBinding}; use crate::{ @@ -97,12 +97,19 @@ impl PageProtect { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + let heap_meta = args.heap; + + let space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(plan_args); + let ret = PageProtect { space: LargeObjectSpace::new( - plan_args.get_space_args("pageprotect", true, VMRequest::discontiguous()), + plan_args.get_space_args("pageprotect", true, space_spec.unwrap()), true, ), - common: CommonPlan::new(plan_args), + common, }; ret.verify_side_metadata_sanity(); diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index eaa7147c78..de12d19d9c 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -11,7 +11,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::VMWorkerThread; use crate::{plan::global::BasePlan, vm::VMBinding}; @@ -137,17 +137,25 @@ impl SemiSpace { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + let heap_meta = args.heap; + + let copyspace0_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let copyspace1_spec = heap_meta.specify_space(SpaceSpec::DontCare); + + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(plan_args); + let res = SemiSpace { hi: AtomicBool::new(false), copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace0", true, copyspace0_spec.unwrap()), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), + plan_args.get_space_args("copyspace1", true, copyspace0_spec.unwrap()), true, ), - common: CommonPlan::new(plan_args), + common, }; res.verify_side_metadata_sanity(); diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 4b007283ee..1966f16931 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -73,6 +73,18 @@ pub struct SpaceMeta { pub contiguous: bool, } +impl SpaceMeta { + /// Create a dummy SpaceMeta for VMSpace. + pub(crate) fn dummy() -> Self { + Self { + space_id: usize::MAX, + start: Address::ZERO, + extent: 0, + contiguous: false, + } + } +} + /// A space meta that will be provided in the future. #[derive(Clone)] pub struct FutureSpaceMeta { From 0f573a42f7e013a35aab88df9dcafd87cb7e12c1 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Fri, 27 Oct 2023 23:27:53 +0800 Subject: [PATCH 06/15] WIP: Further fix plan creation --- src/plan/generational/copying/global.rs | 6 ++---- src/plan/generational/global.rs | 4 +--- src/plan/generational/immix/global.rs | 4 +--- src/plan/global.rs | 28 +++++++++++-------------- src/plan/immix/global.rs | 6 ++---- src/plan/markcompact/global.rs | 6 ++---- src/plan/marksweep/global.rs | 6 ++---- src/plan/nogc/global.rs | 10 ++++----- src/plan/pageprotect/global.rs | 6 ++---- src/plan/semispace/global.rs | 10 ++++----- src/policy/space.rs | 4 ++-- 11 files changed, 34 insertions(+), 56 deletions(-) diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index e01fc6452e..d250fc39fd 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -200,10 +200,8 @@ impl GenCopy { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let heap_meta = plan_args.global_args.heap; - - let copyspace0_spec = heap_meta.specify_space(SpaceSpec::DontCare); - let copyspace1_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let copyspace0_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let copyspace1_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let gen = CommonGenPlan::new(&mut plan_args); diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 370c9047e2..2de3d55280 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -38,9 +38,7 @@ pub struct CommonGenPlan { impl CommonGenPlan { pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { - let heap_meta = args.global_args.heap; - - let nursery_spec = heap_meta.specify_space(SpaceSpec::Extent { + let nursery_spec = args.global_args.heap.specify_space(SpaceSpec::Extent { extent: args.global_args.options.get_max_nursery_bytes(), top: false, }); diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 4d34299246..24fd7f8e36 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -232,9 +232,7 @@ impl GenImmix { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let heap_meta = args.heap; - - let immix_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let immix_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let gen = CommonGenPlan::new(&mut plan_args); diff --git a/src/plan/global.rs b/src/plan/global.rs index a35c44aa78..1b09c05533 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -385,19 +385,17 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { impl BasePlan { #[allow(unused_mut)] // 'args' only needs to be mutable for certain features - pub fn new(mut args: CreateSpecificPlanArgs) -> BasePlan { - let heap_meta = args.global_args.heap; - + pub fn new(args: &mut CreateSpecificPlanArgs) -> BasePlan { #[cfg(feature = "code_space")] - let code_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let code_space_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); #[cfg(feature = "code_space")] - let code_lo_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let code_lo_space_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); #[cfg(feature = "ro_space")] - let ro_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let ro_space_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); // NOTE: We don't specify VM space because it doesn't use SpaceMeta anyway. // BasePlan does not have any nested structs with spaces. We now place spaces. - heap_meta.place_spaces(); + args.global_args.heap.place_spaces(); BasePlan { #[cfg(feature = "code_space")] @@ -426,8 +424,8 @@ impl BasePlan { )), global_state: args.global_args.state.clone(), - gc_trigger: args.global_args.gc_trigger, - options: args.global_args.options, + gc_trigger: args.global_args.gc_trigger.clone(), + options: args.global_args.options.clone(), } } @@ -561,12 +559,10 @@ pub struct CommonPlan { } impl CommonPlan { - pub fn new(mut args: CreateSpecificPlanArgs) -> CommonPlan { - let heap_meta = args.global_args.heap; - - let immortal_spec = heap_meta.specify_space(SpaceSpec::DontCare); - let los_spec = heap_meta.specify_space(SpaceSpec::DontCare); - let nonmoving_spec = heap_meta.specify_space(SpaceSpec::DontCare); + pub fn new(args: &mut CreateSpecificPlanArgs) -> CommonPlan { + let immortal_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let los_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let nonmoving_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); let base = BasePlan::new(args); @@ -582,7 +578,7 @@ impl CommonPlan { true, nonmoving_spec.unwrap(), )), - base: BasePlan::new(args), + base, } } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 80d6fea1ed..e0ed4ba467 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -140,12 +140,10 @@ impl Immix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { - let heap_meta = plan_args.global_args.heap; - - let immix_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let immix_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. - let common = CommonPlan::new(plan_args); + let common = CommonPlan::new(&mut plan_args); let immix = Immix { immix_space: ImmixSpace::new( diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 8b00954162..390c1d0ad0 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -192,12 +192,10 @@ impl MarkCompact { global_side_metadata_specs, }; - let heap_meta = args.heap; - - let mc_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let mc_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. - let common = CommonPlan::new(plan_args); + let common = CommonPlan::new(&mut plan_args); let mc_space = MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_spec.unwrap())); diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 42ae22fed5..31781db073 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -102,12 +102,10 @@ impl MarkSweep { global_side_metadata_specs, }; - let heap_meta = args.heap; - - let ms_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let ms_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. - let common = CommonPlan::new(plan_args); + let common = CommonPlan::new(&mut plan_args); let res = MarkSweep { ms: MarkSweepSpace::new(plan_args.get_space_args( diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 5cf8795576..e59f0a872c 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -89,14 +89,12 @@ impl NoGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let heap_meta = args.heap; - - let nogc_space_spec = heap_meta.specify_space(SpaceSpec::DontCare); - let immortal_spec = heap_meta.specify_space(SpaceSpec::DontCare); - let los = heap_meta.specify_space(SpaceSpec::DontCare); + let nogc_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let immortal_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let los = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. - let base = BasePlan::new(plan_args); + let base = BasePlan::new(&mut plan_args); let res = NoGC { nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 0f9316a4c7..b919a0f23e 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -97,12 +97,10 @@ impl PageProtect { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let heap_meta = args.heap; - - let space_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. - let common = CommonPlan::new(plan_args); + let common = CommonPlan::new(&mut plan_args); let ret = PageProtect { space: LargeObjectSpace::new( diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index de12d19d9c..37584d9062 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -137,13 +137,11 @@ impl SemiSpace { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let heap_meta = args.heap; - - let copyspace0_spec = heap_meta.specify_space(SpaceSpec::DontCare); - let copyspace1_spec = heap_meta.specify_space(SpaceSpec::DontCare); + let copyspace0_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let copyspace1_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. - let common = CommonPlan::new(plan_args); + let common = CommonPlan::new(&mut plan_args); let res = SemiSpace { hi: AtomicBool::new(false), @@ -152,7 +150,7 @@ impl SemiSpace { false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, copyspace0_spec.unwrap()), + plan_args.get_space_args("copyspace1", true, copyspace1_spec.unwrap()), true, ), common, diff --git a/src/policy/space.rs b/src/policy/space.rs index 6a988566b9..1bdd080a5b 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -465,7 +465,7 @@ impl CommonSpace { pub fn new(args: PolicyCreateSpaceArgs) -> Self { let space_meta = args.plan_args.space_meta; let SpaceMeta { - space_id, + space_id: _space_id, // TODO: Let SpaceDescriptor use this space_id start, extent, contiguous, @@ -473,7 +473,7 @@ impl CommonSpace { let descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent); - let mut rtn = CommonSpace { + let rtn = CommonSpace { name: args.plan_args.name, descriptor, space_meta, From e45d515cdab77b434e1c9814ceab34b2945bf723 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 11:20:32 +0800 Subject: [PATCH 07/15] Fix FreeListPageResource creation Since we compute the range of all spaces before creating them, we know the start of the FreeListPageResource when they are created. --- src/mmtk.rs | 8 ++++--- src/policy/immix/immixspace.rs | 2 ++ src/policy/largeobjectspace.rs | 2 +- src/policy/marksweepspace/native_ms/global.rs | 2 +- src/util/heap/blockpageresource.rs | 4 +++- src/util/heap/freelistpageresource.rs | 3 +-- src/util/heap/heap_meta.rs | 16 ++++++++++--- src/util/heap/layout/map32.rs | 24 ++----------------- 8 files changed, 28 insertions(+), 33 deletions(-) diff --git a/src/mmtk.rs b/src/mmtk.rs index 46693df401..c9aebc267b 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -15,7 +15,7 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::heap_meta::HeapMeta; use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; -use crate::util::opaque_pointer::*; +use crate::util::{opaque_pointer::*, Address}; use crate::util::options::Options; use crate::util::reference_processor::ReferenceProcessors; #[cfg(feature = "sanity")] @@ -188,8 +188,10 @@ impl MMTK { // TODO: This probably does not work if we have multiple MMTk instances. VM_MAP.boot(); - // This needs to be called after we create Plan. It needs to use HeapMeta, which is gradually built when we create spaces. - //VM_MAP.finalize_static_space_map(heap.get_discontig_start(), heap.get_discontig_end()); + let (discontig_start, discontig_end) = heap.get_discontiguous_range().unwrap_or((Address::ZERO, Address::ZERO)); + // Map32 calls this to initialize the global freelists, which is reasonable. + // Map64 calls this to fix the starting addresses of RawMemoryFreeList instances, which is a bug and should be fixed. + VM_MAP.finalize_static_space_map(discontig_start, discontig_end); if *options.transparent_hugepages { MMAPPER.set_mmap_strategy(crate::util::memory::MmapStrategy::TransparentHugePages); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index ef1e7fee2c..fc02fcda36 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -291,6 +291,8 @@ impl ImmixSpace { pr: if !common.space_meta.contiguous { BlockPageResource::new_discontiguous( Block::LOG_PAGES, + common.start, + common.extent, vm_map, scheduler.num_workers(), ) diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index 15307b6743..a53ee88d31 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -151,7 +151,7 @@ impl LargeObjectSpace { metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC]), )); let mut pr = if is_discontiguous { - FreeListPageResource::new_discontiguous(vm_map) + FreeListPageResource::new_discontiguous(common.start, common.extent, vm_map) } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }; diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index fdc820911f..d2a6e54d35 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -221,7 +221,7 @@ impl MarkSweepSpace { let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); MarkSweepSpace { pr: if is_discontiguous { - FreeListPageResource::new_discontiguous(vm_map) + FreeListPageResource::new_discontiguous(common.start, common.extent, vm_map) } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }, diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs index 5b3ffbc2f2..b501dc046e 100644 --- a/src/util/heap/blockpageresource.rs +++ b/src/util/heap/blockpageresource.rs @@ -73,12 +73,14 @@ impl BlockPageResource { pub fn new_discontiguous( log_pages: usize, + start: Address, + bytes: usize, vm_map: &'static dyn VMMap, num_workers: usize, ) -> Self { assert!((1 << log_pages) <= PAGES_IN_CHUNK); Self { - flpr: FreeListPageResource::new_discontiguous(vm_map), + flpr: FreeListPageResource::new_discontiguous(start, bytes, vm_map), block_queue: BlockPool::new(num_workers), sync: Mutex::new(()), } diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index 756ccfda03..a0adba4750 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -209,9 +209,8 @@ impl FreeListPageResource { } } - pub fn new_discontiguous(vm_map: &'static dyn VMMap) -> Self { + pub fn new_discontiguous(start: Address, _bytes: usize, vm_map: &'static dyn VMMap) -> Self { let common_flpr = { - let start = vm_layout().available_start(); let common_flpr = Box::new(CommonFreeListPageResource { free_list: vm_map.create_freelist(start), start, diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 1966f16931..e3379589bc 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -11,8 +11,9 @@ use crate::util::Address; /// /// TODO: This type needs a better name. pub struct HeapMeta { - pub heap_start: Address, - pub heap_limit: Address, + heap_start: Address, + heap_limit: Address, + discontiguous_range: Option<(Address, Address)>, entries: Vec, } @@ -120,6 +121,7 @@ impl HeapMeta { heap_start: vm_layout().heap_start, heap_limit: vm_layout().heap_end, entries: Vec::default(), + discontiguous_range: None, } } @@ -193,7 +195,11 @@ impl HeapMeta { entry.promise_meta.provide(meta); } - let (discontig_start, discontig_end) = reserver.remaining_range(); + let discontig_range = reserver.remaining_range(); + self.discontiguous_range = Some(discontig_range); + + let (discontig_start, discontig_end) = discontig_range; + let discontig_extent = discontig_end - discontig_start; for (i, entry) in self.entries.iter_mut().enumerate() { if !entry.spec.dont_care() { @@ -211,6 +217,10 @@ impl HeapMeta { } } } + + pub fn get_discontiguous_range(&self) -> Option<(Address, Address)> { + self.discontiguous_range + } } // make clippy happy diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index c4aa08f52f..e3eae2b8f6 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -9,7 +9,6 @@ use crate::util::heap::space_descriptor::SpaceDescriptor; use crate::util::int_array_freelist::IntArrayFreeList; use crate::util::Address; use std::cell::UnsafeCell; -use std::ptr::NonNull; use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::{Mutex, MutexGuard}; @@ -25,7 +24,6 @@ pub struct Map32Inner { region_map: IntArrayFreeList, global_page_map: IntArrayFreeList, shared_discontig_fl_count: usize, - shared_fl_map: Vec>>, total_available_discontiguous_chunks: usize, finalized: bool, descriptor_map: Vec, @@ -50,7 +48,6 @@ impl Map32 { region_map: IntArrayFreeList::new(max_chunks, max_chunks as _, 1), global_page_map: IntArrayFreeList::new(1, 1, MAX_SPACES), shared_discontig_fl_count: 0, - shared_fl_map: vec![None; MAX_SPACES], total_available_discontiguous_chunks: 0, finalized: false, descriptor_map: vec![SpaceDescriptor::UNINITIALIZED; max_chunks], @@ -107,14 +104,7 @@ impl VMMap for Map32 { Box::new(IntArrayFreeList::new(units, grain, 1)) } - unsafe fn bind_freelist(&self, pr: *const CommonFreeListPageResource) { - let ordinal: usize = (*pr) - .free_list - .downcast_ref::() - .unwrap() - .get_ordinal() as usize; - let self_mut: &mut Map32Inner = self.mut_self(); - self_mut.shared_fl_map[ordinal] = Some(NonNull::new_unchecked(pr as *mut _)); + unsafe fn bind_freelist(&self, _pr: *const CommonFreeListPageResource) { } unsafe fn allocate_contiguous_chunks( @@ -211,17 +201,7 @@ impl VMMap for Map32 { // start_address=0xb0000000, first_chunk=704, last_chunk=703, unavail_start_chunk=704, trailing_chunks=320, pages=0 // startAddress=0x68000000 firstChunk=416 lastChunk=703 unavailStartChunk=704 trailingChunks=320 pages=294912 self_mut.global_page_map.resize_freelist(pages, pages as _); - // TODO: Clippy favors using iter().flatten() rather than iter() with if-let. - // https://rust-lang.github.io/rust-clippy/master/index.html#manual_flatten - // Yi: I am not doing this refactoring right now, as I am not familiar with flatten() and - // there is no test to ensure the refactoring will be correct. - #[allow(clippy::manual_flatten)] - for fl in self_mut.shared_fl_map.iter().copied() { - if let Some(mut fl) = fl { - let fl_mut = unsafe { fl.as_mut() }; - fl_mut.resize_freelist(start_address); - } - } + // [ // 2: -1073741825 // 3: -1073741825 From 848bb972764484ec978366995ee9bbfa15be3482 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 13:51:14 +0800 Subject: [PATCH 08/15] Misc fixes Now it seems working. --- src/mmtk.rs | 2 +- src/policy/immix/immixspace.rs | 1 - src/policy/largeobjectspace.rs | 2 +- src/policy/marksweepspace/native_ms/global.rs | 2 +- src/policy/space.rs | 70 ++++++++++++------- src/util/heap/blockpageresource.rs | 3 +- src/util/heap/freelistpageresource.rs | 9 ++- src/util/heap/heap_meta.rs | 10 +++ 8 files changed, 68 insertions(+), 31 deletions(-) diff --git a/src/mmtk.rs b/src/mmtk.rs index c9aebc267b..070e6af535 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -191,7 +191,7 @@ impl MMTK { let (discontig_start, discontig_end) = heap.get_discontiguous_range().unwrap_or((Address::ZERO, Address::ZERO)); // Map32 calls this to initialize the global freelists, which is reasonable. // Map64 calls this to fix the starting addresses of RawMemoryFreeList instances, which is a bug and should be fixed. - VM_MAP.finalize_static_space_map(discontig_start, discontig_end); + VM_MAP.finalize_static_space_map(discontig_start, discontig_end - 1); if *options.transparent_hugepages { MMAPPER.set_mmap_strategy(crate::util::memory::MmapStrategy::TransparentHugePages); diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index fc02fcda36..bce10bb6d9 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -292,7 +292,6 @@ impl ImmixSpace { BlockPageResource::new_discontiguous( Block::LOG_PAGES, common.start, - common.extent, vm_map, scheduler.num_workers(), ) diff --git a/src/policy/largeobjectspace.rs b/src/policy/largeobjectspace.rs index a53ee88d31..150713cc52 100644 --- a/src/policy/largeobjectspace.rs +++ b/src/policy/largeobjectspace.rs @@ -151,7 +151,7 @@ impl LargeObjectSpace { metadata::extract_side_metadata(&[*VM::VMObjectModel::LOCAL_LOS_MARK_NURSERY_SPEC]), )); let mut pr = if is_discontiguous { - FreeListPageResource::new_discontiguous(common.start, common.extent, vm_map) + FreeListPageResource::new_discontiguous(common.start, vm_map) } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }; diff --git a/src/policy/marksweepspace/native_ms/global.rs b/src/policy/marksweepspace/native_ms/global.rs index d2a6e54d35..f35c22e040 100644 --- a/src/policy/marksweepspace/native_ms/global.rs +++ b/src/policy/marksweepspace/native_ms/global.rs @@ -221,7 +221,7 @@ impl MarkSweepSpace { let common = CommonSpace::new(args.into_policy_args(false, false, local_specs)); MarkSweepSpace { pr: if is_discontiguous { - FreeListPageResource::new_discontiguous(common.start, common.extent, vm_map) + FreeListPageResource::new_discontiguous(common.start, vm_map) } else { FreeListPageResource::new_contiguous(common.start, common.extent, vm_map) }, diff --git a/src/policy/space.rs b/src/policy/space.rs index 1bdd080a5b..9586300c91 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -392,7 +392,7 @@ pub struct CommonSpace { pub space_meta: SpaceMeta, /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know - // the copy semantics for the space. + /// the copy semantics for the space. pub copy: Option, immortal: bool, @@ -400,7 +400,15 @@ pub struct CommonSpace { pub contiguous: bool, pub zeroed: bool, + /// The lower bound of the address range of the space. + /// - If this space is contiguous, this space owns the address range + /// `start <= addr < start + extent`. + /// - If discontiguous, this space shares the address range `start <= addr < start + extent` + /// with other discontiguous spaces. This space only owns individual chunks in this range + /// managed by the `VMMap`. pub start: Address, + + /// The length of the address range of the space. See `start`. pub extent: usize, pub vm_map: &'static dyn VMMap, @@ -471,7 +479,13 @@ impl CommonSpace { contiguous, } = space_meta; - let descriptor = SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent); + let descriptor = if contiguous { + SpaceDescriptor::create_descriptor_from_heap_range(start, start + extent) + } else { + // TODO: `create_descriptor` simply allocates the next "space index". + // We should let it use `space_id` instead. + SpaceDescriptor::create_descriptor() + }; let rtn = CommonSpace { name: args.plan_args.name, @@ -497,30 +511,38 @@ impl CommonSpace { p: PhantomData, }; - // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces, - // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only - // insert into our vm map if the range overlaps with our heap. - { - use crate::util::heap::layout; - let overlap = - Address::range_intersection(&(start..start + extent), &layout::available_range()); - if !overlap.is_empty() { - args.plan_args.vm_map.insert( - overlap.start, - overlap.end - overlap.start, - rtn.descriptor, - ); + if contiguous { + // If the space is contiguous, it implies that the address range + // `start <= addr < start + extent` is solely owned by one space. + // We can eagerly insert `SpaceDescriptor` entries and map metadata. + // If the space is discontiguous, we do this lazily when we allocate chunks from the + // global free list. + + // We only initialize our vm map if the range of the space is in our available heap range. For normally spaces, + // they are definitely in our heap range. But for VM space, a runtime could give us an arbitrary range. We only + // insert into our vm map if the range overlaps with our heap. + { + use crate::util::heap::layout; + let overlap = + Address::range_intersection(&(start..start + extent), &layout::available_range()); + if !overlap.is_empty() { + args.plan_args.vm_map.insert( + overlap.start, + overlap.end - overlap.start, + rtn.descriptor, + ); + } } - } - // For contiguous space, we know its address range so we reserve metadata memory for its range. - if rtn - .metadata - .try_map_metadata_address_range(rtn.start, rtn.extent) - .is_err() - { - // TODO(Javad): handle meta space allocation failure - panic!("failed to mmap meta memory"); + // For contiguous space, we know its address range so we reserve metadata memory for its range. + if rtn + .metadata + .try_map_metadata_address_range(rtn.start, rtn.extent) + .is_err() + { + // TODO(Javad): handle meta space allocation failure + panic!("failed to mmap meta memory"); + } } debug!( diff --git a/src/util/heap/blockpageresource.rs b/src/util/heap/blockpageresource.rs index b501dc046e..5833922eda 100644 --- a/src/util/heap/blockpageresource.rs +++ b/src/util/heap/blockpageresource.rs @@ -74,13 +74,12 @@ impl BlockPageResource { pub fn new_discontiguous( log_pages: usize, start: Address, - bytes: usize, vm_map: &'static dyn VMMap, num_workers: usize, ) -> Self { assert!((1 << log_pages) <= PAGES_IN_CHUNK); Self { - flpr: FreeListPageResource::new_discontiguous(start, bytes, vm_map), + flpr: FreeListPageResource::new_discontiguous(start, vm_map), block_queue: BlockPool::new(num_workers), sync: Mutex::new(()), } diff --git a/src/util/heap/freelistpageresource.rs b/src/util/heap/freelistpageresource.rs index a0adba4750..864503b126 100644 --- a/src/util/heap/freelistpageresource.rs +++ b/src/util/heap/freelistpageresource.rs @@ -179,6 +179,9 @@ impl PageResource for FreeListPageResource { } impl FreeListPageResource { + /// Create a contiguous free list page resource. + /// + /// The page resource will span over the address range from `start` to `start + bytes`. pub fn new_contiguous(start: Address, bytes: usize, vm_map: &'static dyn VMMap) -> Self { let pages = conversions::bytes_to_pages(bytes); let common_flpr = { @@ -209,7 +212,11 @@ impl FreeListPageResource { } } - pub fn new_discontiguous(start: Address, _bytes: usize, vm_map: &'static dyn VMMap) -> Self { + /// Create a discontiguous free list page resource. + /// + /// `start` will be used as the base address for computing chunk addresses from free list + /// indices. We don't need to compute the extent here. + pub fn new_discontiguous(start: Address, vm_map: &'static dyn VMMap) -> Self { let common_flpr = { let common_flpr = Box::new(CommonFreeListPageResource { free_list: vm_map.create_freelist(start), diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index e3379589bc..8422339d66 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -67,6 +67,7 @@ impl SpaceSpec { } /// This struct represents the placement decision of a space. +#[derive(Debug)] pub struct SpaceMeta { pub space_id: usize, pub start: Address, @@ -141,6 +142,7 @@ impl HeapMeta { let mut reserver = AddressRangeReserver::new(self.heap_start, self.heap_limit); if force_use_contiguous_spaces { + debug!("Placing spaces in a generous address space"); let extent = vm_layout().max_space_extent(); for (i, entry) in self.entries.iter_mut().enumerate() { @@ -154,9 +156,11 @@ impl HeapMeta { contiguous: true, }; + debug!(" SpaceMeta: {:?}", meta); entry.promise_meta.provide(meta); } } else { + debug!("Placing spaces in a confined address space"); for (i, entry) in self.entries.iter_mut().enumerate() { let (start, extent) = match entry.spec { SpaceSpec::DontCare => continue, @@ -192,6 +196,7 @@ impl HeapMeta { contiguous: true, }; + debug!(" SpaceMeta: {:?}", meta); entry.promise_meta.provide(meta); } @@ -200,6 +205,8 @@ impl HeapMeta { let (discontig_start, discontig_end) = discontig_range; + debug!("Discontiguous range is [{}, {})", discontig_start, discontig_end); + let discontig_extent = discontig_end - discontig_start; for (i, entry) in self.entries.iter_mut().enumerate() { if !entry.spec.dont_care() { @@ -213,9 +220,12 @@ impl HeapMeta { contiguous: false, }; + debug!(" SpaceMeta: {:?}", meta); entry.promise_meta.provide(meta); } } + + debug!("Space placement finished."); } pub fn get_discontiguous_range(&self) -> Option<(Address, Address)> { From edbcc91e195e092d98f67ea5fe2b82f2188059e3 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 15:07:51 +0800 Subject: [PATCH 09/15] Docs and minor fixes --- src/mmtk.rs | 14 ++++--- src/plan/global.rs | 2 +- src/util/heap/heap_meta.rs | 74 ++++++++++++++++++++++++++++------- src/util/heap/layout/map32.rs | 3 +- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/src/mmtk.rs b/src/mmtk.rs index 070e6af535..0c27435455 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -15,12 +15,12 @@ use crate::util::heap::gc_trigger::GCTrigger; use crate::util::heap::heap_meta::HeapMeta; use crate::util::heap::layout::vm_layout::VMLayout; use crate::util::heap::layout::{self, Mmapper, VMMap}; -use crate::util::{opaque_pointer::*, Address}; use crate::util::options::Options; use crate::util::reference_processor::ReferenceProcessors; #[cfg(feature = "sanity")] use crate::util::sanity::sanity_checker::SanityChecker; use crate::util::statistics::stats::Stats; +use crate::util::{opaque_pointer::*, Address}; use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use std::cell::UnsafeCell; @@ -188,10 +188,14 @@ impl MMTK { // TODO: This probably does not work if we have multiple MMTk instances. VM_MAP.boot(); - let (discontig_start, discontig_end) = heap.get_discontiguous_range().unwrap_or((Address::ZERO, Address::ZERO)); - // Map32 calls this to initialize the global freelists, which is reasonable. - // Map64 calls this to fix the starting addresses of RawMemoryFreeList instances, which is a bug and should be fixed. - VM_MAP.finalize_static_space_map(discontig_start, discontig_end - 1); + + // `Map32` uses `finalize_static_space_map` this to initialize the global freelists, which is reasonable. + // `Map64` uses `finalize_static_space_map` this to fix the starting addresses of `RawMemoryFreeList` instances, which is a bug and should be fixed. + // Since `Map64` doesn't read the start and end of the discontiguous range in the function at all, we can leave them as zeroes. + let discontig_range = heap + .get_discontiguous_range() + .unwrap_or(Address::ZERO..Address::ZERO); + VM_MAP.finalize_static_space_map(discontig_range.start, discontig_range.end); if *options.transparent_hugepages { MMAPPER.set_mmap_strategy(crate::util::memory::MmapStrategy::TransparentHugePages); diff --git a/src/plan/global.rs b/src/plan/global.rs index 1b09c05533..9bede9d9a0 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -420,7 +420,7 @@ impl BasePlan { vm_space: VMSpace::new(args.get_space_args( "vm_space", false, - SpaceMeta::dummy(), + SpaceMeta::vm_space_dummy(), )), global_state: args.global_args.state.clone(), diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 8422339d66..2f69bd50b2 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -1,4 +1,30 @@ +//! This module determines the address ranges of spaces of a plan according to the specifications +//! given by the plan. +//! +//! [`HeapMeta`] is the helper type for space placement, and is a prerequisite of creating plans. +//! It is used as following. +//! +//! 1. A plan declares all the spaces it wants to create using the `specify_space` method. For +//! each space, it passes a [`SpaceSpec`] which specifies the requirements for each space, +//! including whether the space is contiguous, whether it has a fixed extent, and whether it +//! should be place at the low end or high end of the heap range, etc. The `specify_space` +//! method returns a [`FutureSpaceMeta`] for each space which can be used later. +//! 2. After all spaces are specified, the plan calls the `place_spaces` method. It determines +//! the locations (starts and extends) and contiguousness of all spaces according to the policy +//! specified by [`crate::util::heap::layout::vm_layout::vm_layout`]. +//! 3. Then the plan calls `unwrap()` on each [`FutureSpaceMeta`] to get a [`SpaceMeta`] which +//! holds the the placement decision for each space (start, extent, contiguousness, etc.). +//! Using such information, the space can create each concrete spaces. +//! +//! In summary, the plan specifies all spaces before `HeapMeta` makes placement decision, and all +//! spaces know their locations the moment they are created. +//! +//! By doing so, we can avoid creating spaces first and then computing their start addresses and +//! mutate those spaces. JikesRVM's MMTk used to do that, but such practice is unfriendly to Rust +//! which has strict ownership and mutability rules. + use std::cell::RefCell; +use std::ops::Range; use std::rc::Rc; use crate::util::constants::LOG_BYTES_IN_MBYTE; @@ -8,15 +34,17 @@ use crate::util::heap::vm_layout::BYTES_IN_CHUNK; use crate::util::Address; /// This struct is used to determine the placement of each space during the creation of a Plan. +/// Read the module-level documentation for how to use. /// /// TODO: This type needs a better name. pub struct HeapMeta { heap_start: Address, heap_limit: Address, - discontiguous_range: Option<(Address, Address)>, + discontiguous_range: Option>, entries: Vec, } +/// A space specification and a "promise" for sending `SpaceMeta` to the user (plan). struct SpaceEntry { spec: SpaceSpec, promise_meta: PromiseSpaceMeta, @@ -29,8 +57,10 @@ struct SpaceEntry { /// the space placement strategy may give each space a contiguous 2TiB address space even if it /// requests a small extent. pub enum SpaceSpec { - /// There is no size or place requirement for the space. The space may be given a very large - /// contiguous or discontiguous space range of address, depending on the strategy. + /// There is no size, location, or contiguousness requirement for the space. In a confined + /// address space, the space may be given a discontiguous address range shared with other + /// spaces; in a generous address space, the space may be given a very large contiguous address + /// range solely owned by this space. DontCare, /// Require a contiguous range of address of a fixed size. Extent { @@ -69,15 +99,21 @@ impl SpaceSpec { /// This struct represents the placement decision of a space. #[derive(Debug)] pub struct SpaceMeta { + /// An assigned ID of the space. Guaranteed to be unique. pub space_id: usize, + /// The start of the address range of the space. For discontiguous spaces, this range will be + /// shared with other discontiguous spaces. pub start: Address, + /// The extent of the address range of the space. pub extent: usize, + /// `true` if the space is contiguous. pub contiguous: bool, } impl SpaceMeta { - /// Create a dummy SpaceMeta for VMSpace. - pub(crate) fn dummy() -> Self { + /// Create a dummy `SpaceMeta for `VMSpace` because the address range of `VMSpace` is not + /// determined by `HeapMeta`. + pub(crate) fn vm_space_dummy() -> Self { Self { space_id: usize::MAX, start: Address::ZERO, @@ -87,7 +123,7 @@ impl SpaceMeta { } } -/// A space meta that will be provided in the future. +/// A `SpaceMeta` that will be provided in the future. #[derive(Clone)] pub struct FutureSpaceMeta { inner: Rc>>, @@ -103,7 +139,7 @@ impl FutureSpaceMeta { } } -/// The struct for HeapMeta to provide a SpaceMeta instance for its user. +/// The struct for `HeapMeta` to provide a `SpaceMeta` instance for its user. struct PromiseSpaceMeta { inner: Rc>>, } @@ -126,6 +162,7 @@ impl HeapMeta { } } + /// Declare a space and specify the detailed requirements. pub fn specify_space(&mut self, spec: SpaceSpec) -> FutureSpaceMeta { let shared_meta = Rc::new(RefCell::new(None)); let future_meta = FutureSpaceMeta { @@ -136,6 +173,7 @@ impl HeapMeta { future_meta } + /// Determine the locations of all specified spaces. pub fn place_spaces(&mut self) { let force_use_contiguous_spaces = vm_layout().force_use_contiguous_spaces; @@ -201,11 +239,16 @@ impl HeapMeta { } let discontig_range = reserver.remaining_range(); - self.discontiguous_range = Some(discontig_range); - - let (discontig_start, discontig_end) = discontig_range; + self.discontiguous_range = Some(discontig_range.clone()); + let Range { + start: discontig_start, + end: discontig_end, + } = discontig_range; - debug!("Discontiguous range is [{}, {})", discontig_start, discontig_end); + debug!( + "Discontiguous range is [{}, {})", + discontig_start, discontig_end + ); let discontig_extent = discontig_end - discontig_start; for (i, entry) in self.entries.iter_mut().enumerate() { @@ -228,8 +271,9 @@ impl HeapMeta { debug!("Space placement finished."); } - pub fn get_discontiguous_range(&self) -> Option<(Address, Address)> { - self.discontiguous_range + /// Get the shared address range for discontigous spaces. + pub fn get_discontiguous_range(&self) -> Option> { + self.discontiguous_range.clone() } } @@ -278,7 +322,7 @@ impl AddressRangeReserver { ret } - pub fn remaining_range(&self) -> (Address, Address) { - (self.lower_bound, self.upper_bound) + pub fn remaining_range(&self) -> Range
{ + self.lower_bound..self.upper_bound } } diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index e3eae2b8f6..6f0fac3e58 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -194,7 +194,8 @@ impl VMMap for Map32 { /* establish bounds of discontiguous space */ let start_address = from; let first_chunk = start_address.chunk_index(); - let last_chunk = to.chunk_index(); + let last_byte = to - 1; + let last_chunk = last_byte.chunk_index(); let unavail_start_chunk = last_chunk + 1; let trailing_chunks = vm_layout().max_chunks() - unavail_start_chunk; let pages = (1 + last_chunk - first_chunk) * PAGES_IN_CHUNK; From 0c4df754ab3d2fb360dd66e1fa2ecbb38bc7156e Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 15:10:05 +0800 Subject: [PATCH 10/15] Remove VMRequest completely. --- src/plan/nogc/global.rs | 2 -- src/util/heap/mod.rs | 2 -- src/util/heap/vmrequest.rs | 69 -------------------------------------- 3 files changed, 73 deletions(-) delete mode 100644 src/util/heap/vmrequest.rs diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index e59f0a872c..b6a6312413 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -10,8 +10,6 @@ use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::heap::heap_meta::SpaceSpec; -#[allow(unused_imports)] -use crate::util::heap::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; diff --git a/src/util/heap/mod.rs b/src/util/heap/mod.rs index f3cf1ff12b..34d18595fe 100644 --- a/src/util/heap/mod.rs +++ b/src/util/heap/mod.rs @@ -10,7 +10,6 @@ pub(crate) mod heap_meta; pub(crate) mod monotonepageresource; pub(crate) mod pageresource; pub(crate) mod space_descriptor; -mod vmrequest; pub(crate) use self::accounting::PageAccounting; pub(crate) use self::blockpageresource::BlockPageResource; @@ -18,4 +17,3 @@ pub(crate) use self::freelistpageresource::FreeListPageResource; pub use self::layout::vm_layout; pub(crate) use self::monotonepageresource::MonotonePageResource; pub(crate) use self::pageresource::PageResource; -pub(crate) use self::vmrequest::VMRequest; diff --git a/src/util/heap/vmrequest.rs b/src/util/heap/vmrequest.rs deleted file mode 100644 index 7a6f8148f8..0000000000 --- a/src/util/heap/vmrequest.rs +++ /dev/null @@ -1,69 +0,0 @@ -use super::layout::vm_layout::*; -use crate::util::constants::*; -use crate::util::Address; - -#[derive(Clone, Copy, Debug)] -pub enum VMRequest { - Discontiguous, - Fixed { start: Address, extent: usize }, - Extent { extent: usize, top: bool }, - Fraction { frac: f32, top: bool }, -} - -impl VMRequest { - pub fn is_discontiguous(&self) -> bool { - matches!(self, VMRequest::Discontiguous { .. }) - } - - pub fn common64bit(top: bool) -> Self { - VMRequest::Extent { - extent: vm_layout().max_space_extent(), - top, - } - } - - pub fn discontiguous() -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(false); - } - VMRequest::Discontiguous - } - - pub fn fixed_size(mb: usize) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(false); - } - VMRequest::Extent { - extent: mb << LOG_BYTES_IN_MBYTE, - top: false, - } - } - - pub fn fraction(frac: f32) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(false); - } - VMRequest::Fraction { frac, top: false } - } - - pub fn high_fixed_size(mb: usize) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(true); - } - VMRequest::Extent { - extent: mb << LOG_BYTES_IN_MBYTE, - top: true, - } - } - - pub fn fixed_extent(extent: usize, top: bool) -> Self { - if cfg!(target_pointer_width = "64") && vm_layout().force_use_contiguous_spaces { - return Self::common64bit(top); - } - VMRequest::Extent { extent, top } - } - - pub fn fixed(start: Address, extent: usize) -> Self { - VMRequest::Fixed { start, extent } - } -} From f019f8ab521dcc36c9b29d2437e7da3bcafbf54a Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 15:11:58 +0800 Subject: [PATCH 11/15] Formatting and clippy --- src/plan/generational/copying/global.rs | 12 +++++++++--- src/plan/generational/global.rs | 6 +----- src/plan/generational/immix/global.rs | 7 +++++-- src/plan/immix/global.rs | 5 ++++- src/plan/markcompact/global.rs | 5 ++++- src/plan/marksweep/global.rs | 11 +++++------ src/plan/nogc/global.rs | 15 ++++++++++++--- src/plan/pageprotect/global.rs | 5 ++++- src/plan/semispace/global.rs | 10 ++++++++-- src/policy/immortalspace.rs | 2 +- src/policy/space.rs | 6 ++++-- src/util/heap/layout/map32.rs | 3 +-- 12 files changed, 58 insertions(+), 29 deletions(-) diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index d250fc39fd..aecae2433d 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -16,10 +16,10 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::util::heap::heap_meta::SpaceSpec; use crate::vm::*; use crate::ObjectQueue; use enum_map::EnumMap; @@ -200,8 +200,14 @@ impl GenCopy { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let copyspace0_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); - let copyspace1_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let copyspace0_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); + let copyspace1_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let gen = CommonGenPlan::new(&mut plan_args); diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 2de3d55280..79fe582e32 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -47,11 +47,7 @@ impl CommonGenPlan { let common = CommonPlan::new(args); let nursery = CopySpace::new( - args.get_space_args( - "nursery", - true, - nursery_spec.unwrap(), - ), + args.get_space_args("nursery", true, nursery_spec.unwrap()), true, ); diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 24fd7f8e36..b973deabd8 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -17,10 +17,10 @@ use crate::scheduler::GCWorkScheduler; use crate::scheduler::GCWorker; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; -use crate::util::heap::heap_meta::SpaceSpec; use crate::vm::*; use crate::ObjectQueue; @@ -232,7 +232,10 @@ impl GenImmix { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let immix_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let immix_space_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let gen = CommonGenPlan::new(&mut plan_args); diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index e0ed4ba467..0ae70842ae 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -140,7 +140,10 @@ impl Immix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { - let immix_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let immix_space_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 390c1d0ad0..574e54e318 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -192,7 +192,10 @@ impl MarkCompact { global_side_metadata_specs, }; - let mc_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let mc_space_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 31781db073..2d6d9d0931 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -102,17 +102,16 @@ impl MarkSweep { global_side_metadata_specs, }; - let ms_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let ms_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); let res = MarkSweep { - ms: MarkSweepSpace::new(plan_args.get_space_args( - "ms", - true, - ms_spec.unwrap(), - )), + ms: MarkSweepSpace::new(plan_args.get_space_args("ms", true, ms_spec.unwrap())), common, }; diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index b6a6312413..4da5fe9e63 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -87,9 +87,18 @@ impl NoGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let nogc_space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); - let immortal_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); - let los = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let nogc_space_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); + let immortal_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); + let los = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let base = BasePlan::new(&mut plan_args); diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index b919a0f23e..28c97e02bb 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -97,7 +97,10 @@ impl PageProtect { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let space_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let space_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 37584d9062..954779220b 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -137,8 +137,14 @@ impl SemiSpace { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let copyspace0_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); - let copyspace1_spec = plan_args.global_args.heap.specify_space(SpaceSpec::DontCare); + let copyspace0_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); + let copyspace1_spec = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); diff --git a/src/policy/immortalspace.rs b/src/policy/immortalspace.rs index 7fa8e127c8..1894ebbd9b 100644 --- a/src/policy/immortalspace.rs +++ b/src/policy/immortalspace.rs @@ -145,7 +145,7 @@ impl ImmortalSpace { start: Address, size: usize, ) -> Self { - assert!(!!args.space_meta.contiguous); + assert!(args.space_meta.contiguous); ImmortalSpace { mark_state: MarkState::new(), pr: MonotonePageResource::new_contiguous(start, size, args.vm_map), diff --git a/src/policy/space.rs b/src/policy/space.rs index 9586300c91..521caae9b9 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -523,8 +523,10 @@ impl CommonSpace { // insert into our vm map if the range overlaps with our heap. { use crate::util::heap::layout; - let overlap = - Address::range_intersection(&(start..start + extent), &layout::available_range()); + let overlap = Address::range_intersection( + &(start..start + extent), + &layout::available_range(), + ); if !overlap.is_empty() { args.plan_args.vm_map.insert( overlap.start, diff --git a/src/util/heap/layout/map32.rs b/src/util/heap/layout/map32.rs index 6f0fac3e58..9f7fc4f7fb 100644 --- a/src/util/heap/layout/map32.rs +++ b/src/util/heap/layout/map32.rs @@ -104,8 +104,7 @@ impl VMMap for Map32 { Box::new(IntArrayFreeList::new(units, grain, 1)) } - unsafe fn bind_freelist(&self, _pr: *const CommonFreeListPageResource) { - } + unsafe fn bind_freelist(&self, _pr: *const CommonFreeListPageResource) {} unsafe fn allocate_contiguous_chunks( &self, From 80170c3a4561752d1b5a5b728e3716dba0a449ee Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 16:53:29 +0800 Subject: [PATCH 12/15] Fix typo --- src/plan/global.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/plan/global.rs b/src/plan/global.rs index 9bede9d9a0..6dc570104f 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -408,13 +408,13 @@ impl BasePlan { code_lo_space: ImmortalSpace::new(args.get_space_args( "code_lo_space", true, - code_space_spec.unwrap(), + code_lo_space_spec.unwrap(), )), #[cfg(feature = "ro_space")] ro_space: ImmortalSpace::new(args.get_space_args( "ro_space", true, - code_space_spec.unwrap(), + ro_space_spec.unwrap(), )), #[cfg(feature = "vm_space")] vm_space: VMSpace::new(args.get_space_args( From 09efc8450f564f2764f78f3b6421cf530b80705a Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 19:27:22 +0800 Subject: [PATCH 13/15] Fix tutorial --- .../tutorial/code/mygc_semispace/global.rs | 36 ++++++++++++--- docs/userguide/src/tutorial/mygc/ss/alloc.md | 44 ++++++++++++++----- 2 files changed, 63 insertions(+), 17 deletions(-) diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index a7e6fe7485..567f1bbd80 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -2,8 +2,8 @@ use crate::plan::global::BasePlan; //Modify use crate::plan::global::CommonPlan; // Add use crate::plan::global::{CreateGeneralPlanArgs, CreateSpecificPlanArgs}; -use crate::plan::mygc::mutator::ALLOCATOR_MAPPING; use crate::plan::mygc::gc_work::MyGCWorkContext; +use crate::plan::mygc::mutator::ALLOCATOR_MAPPING; use crate::plan::AllocationSemantics; use crate::plan::Plan; use crate::plan::PlanConstraints; @@ -12,7 +12,7 @@ use crate::policy::space::Space; use crate::scheduler::*; // Modify use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::VMRequest; +use crate::util::heap::heap_meta::SpaceSpec; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -167,13 +167,35 @@ impl MyGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; + // ANCHOR: specify_spaces + let copyspace0_meta = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); + let copyspace1_meta = plan_args + .global_args + .heap + .specify_space(SpaceSpec::DontCare); + // ANCHOR_END: specify_spaces + + // ANCHOR: create_common_plan + // Spaces will eventually be placed by `BasePlan`. + let common = CommonPlan::new(&mut plan_args); + // ANCHOR_END: create_common_plan + let res = MyGC { hi: AtomicBool::new(false), - // ANCHOR: copyspace_new - copyspace0: CopySpace::new(plan_args.get_space_args("copyspace0", true, VMRequest::discontiguous()), false), - // ANCHOR_END: copyspace_new - copyspace1: CopySpace::new(plan_args.get_space_args("copyspace1", true, VMRequest::discontiguous()), true), - common: CommonPlan::new(plan_args), + // ANCHOR: copyspaces_new + copyspace0: CopySpace::new( + plan_args.get_space_args("copyspace0", true, copyspace0_meta.unwrap()), + false, + ), + copyspace1: CopySpace::new( + plan_args.get_space_args("copyspace1", true, copyspace1_meta.unwrap()), + true, + ), + // ANCHOR_END: copyspaces_new + common, }; res.verify_side_metadata_sanity(); diff --git a/docs/userguide/src/tutorial/mygc/ss/alloc.md b/docs/userguide/src/tutorial/mygc/ss/alloc.md index 1da743a41a..a1f568492b 100644 --- a/docs/userguide/src/tutorial/mygc/ss/alloc.md +++ b/docs/userguide/src/tutorial/mygc/ss/alloc.md @@ -82,24 +82,48 @@ use mmtk_macros::{HasSpaces, PlanTraceObject}; #### Constructor -Change `fn new()`. This section initialises and prepares the objects in MyGC +Change `fn new()`. This section initialises and prepares the objects in MyGC that you just defined. - 1. Delete the definition of `mygc_space`. - Instead, we will define the two copyspaces here. - 2. Define one of the copyspaces by adding the following code: ```rust -{{#include ../../code/mygc_semispace/global.rs:copyspace_new}} +{{#include ../../code/mygc_semispace/global.rs:plan_new}} ``` - 3. Create another copyspace, called `copyspace1`, defining it as a fromspace - instead of a tospace. (Hint: the definitions for - copyspaces are in `src/policy/copyspace.rs`.) - 4. Finally, replace the old MyGC initializer. +We now look into the steps. + +First, we use the `HeapMeta` object provided from the argument to specify all +spaces that will be created in the current plan. + ```rust -{{#include ../../code/mygc_semispace/global.rs:plan_new}} +{{#include ../../code/mygc_semispace/global.rs:specify_spaces}} ``` +We do not have special requirements for either of the copy-spaces, so we just +specify `SpaceSpec::DontCare` here. At this step, the return values +`copyspace0_meta` and `copyspace1_meta` has not become usable, yet. + +Then, we construct the parent structure `CommonPlan::new()`. + +```rust +{{#include ../../code/mygc_semispace/global.rs:create_common_plan}} +``` + +`CommonPlan::new()` will call `BasePlan::new()` which, in turn, will call +`HeapMeta::place_spaces()`. That will determine the address range of all +spaces we specified. + +After this, we can call `copyspace0_meta.unwrap()` to retrieve the compute +metadata for creating `copyspace0`, and `copyspace1` is similar. We can now +create the two `CopySpace` instances. + +```rust +{{#include ../../code/mygc_semispace/global.rs:copyspaces_new}} +``` + +Note that `CommonSpace` and `BaseSpace` define other spaces, such as the large +object space. Their constructors specify their spaces before determining their +address ranges and instantiating them, just like we discribed here. + ### Access MyGC spaces Add a new section of methods for MyGC: From 3d78a80fc60e4ecc9e7348025fc108719004d017 Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 19:32:24 +0800 Subject: [PATCH 14/15] Change variable names from _spec to _meta --- src/plan/generational/global.rs | 4 ++-- src/plan/global.rs | 24 ++++++++++++------------ src/plan/immix/global.rs | 4 ++-- src/plan/markcompact/global.rs | 4 ++-- src/plan/marksweep/global.rs | 4 ++-- src/plan/nogc/global.rs | 8 ++++---- src/plan/pageprotect/global.rs | 4 ++-- src/plan/semispace/global.rs | 8 ++++---- 8 files changed, 30 insertions(+), 30 deletions(-) diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 79fe582e32..08cd97ea41 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -38,7 +38,7 @@ pub struct CommonGenPlan { impl CommonGenPlan { pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { - let nursery_spec = args.global_args.heap.specify_space(SpaceSpec::Extent { + let nursery_meta = args.global_args.heap.specify_space(SpaceSpec::Extent { extent: args.global_args.options.get_max_nursery_bytes(), top: false, }); @@ -47,7 +47,7 @@ impl CommonGenPlan { let common = CommonPlan::new(args); let nursery = CopySpace::new( - args.get_space_args("nursery", true, nursery_spec.unwrap()), + args.get_space_args("nursery", true, nursery_meta.unwrap()), true, ); diff --git a/src/plan/global.rs b/src/plan/global.rs index 6dc570104f..21c74c3b5e 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -387,11 +387,11 @@ impl BasePlan { #[allow(unused_mut)] // 'args' only needs to be mutable for certain features pub fn new(args: &mut CreateSpecificPlanArgs) -> BasePlan { #[cfg(feature = "code_space")] - let code_space_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let code_space_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); #[cfg(feature = "code_space")] - let code_lo_space_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let code_lo_space_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); #[cfg(feature = "ro_space")] - let ro_space_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let ro_space_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); // NOTE: We don't specify VM space because it doesn't use SpaceMeta anyway. // BasePlan does not have any nested structs with spaces. We now place spaces. @@ -402,19 +402,19 @@ impl BasePlan { code_space: ImmortalSpace::new(args.get_space_args( "code_space", true, - code_space_spec.unwrap(), + code_space_meta.unwrap(), )), #[cfg(feature = "code_space")] code_lo_space: ImmortalSpace::new(args.get_space_args( "code_lo_space", true, - code_lo_space_spec.unwrap(), + code_lo_space_meta.unwrap(), )), #[cfg(feature = "ro_space")] ro_space: ImmortalSpace::new(args.get_space_args( "ro_space", true, - ro_space_spec.unwrap(), + ro_space_meta.unwrap(), )), #[cfg(feature = "vm_space")] vm_space: VMSpace::new(args.get_space_args( @@ -560,9 +560,9 @@ pub struct CommonPlan { impl CommonPlan { pub fn new(args: &mut CreateSpecificPlanArgs) -> CommonPlan { - let immortal_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); - let los_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); - let nonmoving_spec = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let immortal_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let los_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let nonmoving_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); let base = BasePlan::new(args); @@ -570,13 +570,13 @@ impl CommonPlan { immortal: ImmortalSpace::new(args.get_space_args( "immortal", true, - immortal_spec.unwrap(), + immortal_meta.unwrap(), )), - los: LargeObjectSpace::new(args.get_space_args("los", true, los_spec.unwrap()), false), + los: LargeObjectSpace::new(args.get_space_args("los", true, los_meta.unwrap()), false), nonmoving: ImmortalSpace::new(args.get_space_args( "nonmoving", true, - nonmoving_spec.unwrap(), + nonmoving_meta.unwrap(), )), base, } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 0ae70842ae..0e70f0824d 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -140,7 +140,7 @@ impl Immix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { - let immix_space_spec = plan_args + let immix_space_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); @@ -150,7 +150,7 @@ impl Immix { let immix = Immix { immix_space: ImmixSpace::new( - plan_args.get_space_args("immix", true, immix_space_spec.unwrap()), + plan_args.get_space_args("immix", true, immix_space_meta.unwrap()), space_args, ), common, diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index 574e54e318..ed1ef307ce 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -192,7 +192,7 @@ impl MarkCompact { global_side_metadata_specs, }; - let mc_space_spec = plan_args + let mc_space_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); @@ -201,7 +201,7 @@ impl MarkCompact { let common = CommonPlan::new(&mut plan_args); let mc_space = - MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_spec.unwrap())); + MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_meta.unwrap())); let res = MarkCompact { mc_space, common }; diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 2d6d9d0931..79117615e4 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -102,7 +102,7 @@ impl MarkSweep { global_side_metadata_specs, }; - let ms_spec = plan_args + let ms_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); @@ -111,7 +111,7 @@ impl MarkSweep { let common = CommonPlan::new(&mut plan_args); let res = MarkSweep { - ms: MarkSweepSpace::new(plan_args.get_space_args("ms", true, ms_spec.unwrap())), + ms: MarkSweepSpace::new(plan_args.get_space_args("ms", true, ms_meta.unwrap())), common, }; diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index 4da5fe9e63..c178bdee1a 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -87,11 +87,11 @@ impl NoGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let nogc_space_spec = plan_args + let nogc_space_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); - let immortal_spec = plan_args + let immortal_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); @@ -107,12 +107,12 @@ impl NoGC { nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), - nogc_space_spec.unwrap(), + nogc_space_meta.unwrap(), )), immortal: ImmortalSpace::new(plan_args.get_space_args( "immortal", true, - immortal_spec.unwrap(), + immortal_meta.unwrap(), )), los: ImmortalSpace::new(plan_args.get_space_args("los", true, los.unwrap())), base, diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 28c97e02bb..540a90fa3f 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -97,7 +97,7 @@ impl PageProtect { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let space_spec = plan_args + let space_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); @@ -107,7 +107,7 @@ impl PageProtect { let ret = PageProtect { space: LargeObjectSpace::new( - plan_args.get_space_args("pageprotect", true, space_spec.unwrap()), + plan_args.get_space_args("pageprotect", true, space_meta.unwrap()), true, ), common, diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index 954779220b..f0122df0d0 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -137,11 +137,11 @@ impl SemiSpace { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let copyspace0_spec = plan_args + let copyspace0_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); - let copyspace1_spec = plan_args + let copyspace1_meta = plan_args .global_args .heap .specify_space(SpaceSpec::DontCare); @@ -152,11 +152,11 @@ impl SemiSpace { let res = SemiSpace { hi: AtomicBool::new(false), copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, copyspace0_spec.unwrap()), + plan_args.get_space_args("copyspace0", true, copyspace0_meta.unwrap()), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, copyspace1_spec.unwrap()), + plan_args.get_space_args("copyspace1", true, copyspace1_meta.unwrap()), true, ), common, From 2ff2116f435d4511355c834e5ffdc651aa0de1fa Mon Sep 17 00:00:00 2001 From: Kunshan Wang Date: Mon, 30 Oct 2023 20:45:21 +0800 Subject: [PATCH 15/15] Rename types and variables --- .../tutorial/code/mygc_semispace/global.rs | 18 +-- docs/userguide/src/tutorial/mygc/ss/alloc.md | 17 +-- src/plan/generational/copying/global.rs | 14 +- src/plan/generational/global.rs | 6 +- src/plan/generational/immix/global.rs | 8 +- src/plan/global.rs | 32 ++--- src/plan/immix/global.rs | 8 +- src/plan/markcompact/global.rs | 8 +- src/plan/marksweep/global.rs | 8 +- src/plan/nogc/global.rs | 20 +-- src/plan/pageprotect/global.rs | 8 +- src/plan/semispace/global.rs | 14 +- src/policy/space.rs | 10 +- src/util/heap/heap_meta.rs | 132 ++++++++++-------- 14 files changed, 160 insertions(+), 143 deletions(-) diff --git a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs index 567f1bbd80..7ebcf1b37b 100644 --- a/docs/userguide/src/tutorial/code/mygc_semispace/global.rs +++ b/docs/userguide/src/tutorial/code/mygc_semispace/global.rs @@ -12,13 +12,13 @@ use crate::policy::space::Space; use crate::scheduler::*; // Modify use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; use enum_map::EnumMap; use std::sync::atomic::{AtomicBool, Ordering}; // Add -// ANCHOR_END: imports_no_gc_work + // ANCHOR_END: imports_no_gc_work // Remove #[allow(unused_imports)]. // Remove handle_user_collection_request(). @@ -66,7 +66,7 @@ impl Plan for MyGC { }, space_mapping: vec![ // The tospace argument doesn't matter, we will rebind before a GC anyway. - (CopySelector::CopySpace(0), &self.copyspace0) + (CopySelector::CopySpace(0), &self.copyspace0), ], constraints: &MYGC_CONSTRAINTS, } @@ -168,14 +168,14 @@ impl MyGC { }; // ANCHOR: specify_spaces - let copyspace0_meta = plan_args + let copyspace0_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); - let copyspace1_meta = plan_args + .specify_space(VMRequest::Unrestricted); + let copyspace1_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // ANCHOR_END: specify_spaces // ANCHOR: create_common_plan @@ -187,11 +187,11 @@ impl MyGC { hi: AtomicBool::new(false), // ANCHOR: copyspaces_new copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, copyspace0_meta.unwrap()), + plan_args.get_space_args("copyspace0", true, copyspace0_resp.unwrap()), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, copyspace1_meta.unwrap()), + plan_args.get_space_args("copyspace1", true, copyspace1_resp.unwrap()), true, ), // ANCHOR_END: copyspaces_new diff --git a/docs/userguide/src/tutorial/mygc/ss/alloc.md b/docs/userguide/src/tutorial/mygc/ss/alloc.md index a1f568492b..c27635854b 100644 --- a/docs/userguide/src/tutorial/mygc/ss/alloc.md +++ b/docs/userguide/src/tutorial/mygc/ss/alloc.md @@ -99,8 +99,8 @@ spaces that will be created in the current plan. ``` We do not have special requirements for either of the copy-spaces, so we just -specify `SpaceSpec::DontCare` here. At this step, the return values -`copyspace0_meta` and `copyspace1_meta` has not become usable, yet. +specify `VMRequest::Unrestricted` here. At this step, the return values +`copyspace0_resp` and `copyspace1_resp` has not become usable, yet. Then, we construct the parent structure `CommonPlan::new()`. @@ -112,17 +112,18 @@ Then, we construct the parent structure `CommonPlan::new()`. `HeapMeta::place_spaces()`. That will determine the address range of all spaces we specified. -After this, we can call `copyspace0_meta.unwrap()` to retrieve the compute -metadata for creating `copyspace0`, and `copyspace1` is similar. We can now -create the two `CopySpace` instances. +After this, we can call `copyspace0_resp.unwrap()` to retrieve the computed +placement information for creating `copyspace0`. `copyspace1` is similar. We +can now create the two `CopySpace` instances. ```rust {{#include ../../code/mygc_semispace/global.rs:copyspaces_new}} ``` -Note that `CommonSpace` and `BaseSpace` define other spaces, such as the large -object space. Their constructors specify their spaces before determining their -address ranges and instantiating them, just like we discribed here. +Note that `CommonSpace` and `BaseSpace` also define other spaces, such as the +large object space. Their constructors specify their spaces before +determining their address ranges and instantiating them, just like we +discribed here. ### Access MyGC spaces diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index aecae2433d..84f16a3d70 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -16,7 +16,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -200,24 +200,24 @@ impl GenCopy { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let copyspace0_spec = plan_args + let copyspace0_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); - let copyspace1_spec = plan_args + .specify_space(VMRequest::Unrestricted); + let copyspace1_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let gen = CommonGenPlan::new(&mut plan_args); let copyspace0 = CopySpace::new( - plan_args.get_space_args("copyspace0", true, copyspace0_spec.unwrap()), + plan_args.get_space_args("copyspace0", true, copyspace0_resp.unwrap()), false, ); let copyspace1 = CopySpace::new( - plan_args.get_space_args("copyspace1", true, copyspace1_spec.unwrap()), + plan_args.get_space_args("copyspace1", true, copyspace1_resp.unwrap()), true, ); diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 08cd97ea41..f827a7f24b 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -6,7 +6,7 @@ use crate::policy::copyspace::CopySpace; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::copy::CopySemantics; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::statistics::counter::EventCounter; use crate::util::Address; use crate::util::ObjectReference; @@ -38,7 +38,7 @@ pub struct CommonGenPlan { impl CommonGenPlan { pub fn new(args: &mut CreateSpecificPlanArgs) -> Self { - let nursery_meta = args.global_args.heap.specify_space(SpaceSpec::Extent { + let nursery_resp = args.global_args.heap.specify_space(VMRequest::Extent { extent: args.global_args.options.get_max_nursery_bytes(), top: false, }); @@ -47,7 +47,7 @@ impl CommonGenPlan { let common = CommonPlan::new(args); let nursery = CopySpace::new( - args.get_space_args("nursery", true, nursery_meta.unwrap()), + args.get_space_args("nursery", true, nursery_resp.unwrap()), true, ); diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index b973deabd8..4a7831a7da 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -17,7 +17,7 @@ use crate::scheduler::GCWorkScheduler; use crate::scheduler::GCWorker; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::Address; use crate::util::ObjectReference; use crate::util::VMWorkerThread; @@ -232,16 +232,16 @@ impl GenImmix { crate::plan::generational::new_generational_global_metadata_specs::(), }; - let immix_space_spec = plan_args + let immix_space_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let gen = CommonGenPlan::new(&mut plan_args); let immix_space = ImmixSpace::new( - plan_args.get_space_args("immix_mature", true, immix_space_spec.unwrap()), + plan_args.get_space_args("immix_mature", true, immix_space_resp.unwrap()), ImmixSpaceArgs { reset_log_bit_in_major_gc: false, // We don't need to unlog objects at tracing. Instead, we unlog objects at copying. diff --git a/src/plan/global.rs b/src/plan/global.rs index 21c74c3b5e..97c2ff9df3 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -14,7 +14,7 @@ use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::{CopyConfig, GCWorkerCopyContext}; use crate::util::heap::gc_trigger::GCTrigger; -use crate::util::heap::heap_meta::{HeapMeta, SpaceMeta, SpaceSpec}; +use crate::util::heap::heap_meta::{HeapMeta, VMResponse, VMRequest}; use crate::util::heap::layout::Mmapper; use crate::util::heap::layout::VMMap; use crate::util::metadata::side_metadata::SideMetadataSanity; @@ -365,7 +365,7 @@ impl<'a, VM: VMBinding> CreateSpecificPlanArgs<'a, VM> { &mut self, name: &'static str, zeroed: bool, - space_meta: SpaceMeta, + space_meta: VMResponse, ) -> PlanCreateSpaceArgs { PlanCreateSpaceArgs { name, @@ -387,12 +387,12 @@ impl BasePlan { #[allow(unused_mut)] // 'args' only needs to be mutable for certain features pub fn new(args: &mut CreateSpecificPlanArgs) -> BasePlan { #[cfg(feature = "code_space")] - let code_space_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let code_space_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); #[cfg(feature = "code_space")] - let code_lo_space_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let code_lo_space_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); #[cfg(feature = "ro_space")] - let ro_space_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); - // NOTE: We don't specify VM space because it doesn't use SpaceMeta anyway. + let ro_space_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + // NOTE: We don't specify VM space because it doesn't use any information in `VMResponse`. // BasePlan does not have any nested structs with spaces. We now place spaces. args.global_args.heap.place_spaces(); @@ -402,25 +402,25 @@ impl BasePlan { code_space: ImmortalSpace::new(args.get_space_args( "code_space", true, - code_space_meta.unwrap(), + code_space_resp.unwrap(), )), #[cfg(feature = "code_space")] code_lo_space: ImmortalSpace::new(args.get_space_args( "code_lo_space", true, - code_lo_space_meta.unwrap(), + code_lo_space_resp.unwrap(), )), #[cfg(feature = "ro_space")] ro_space: ImmortalSpace::new(args.get_space_args( "ro_space", true, - ro_space_meta.unwrap(), + ro_space_resp.unwrap(), )), #[cfg(feature = "vm_space")] vm_space: VMSpace::new(args.get_space_args( "vm_space", false, - SpaceMeta::vm_space_dummy(), + VMResponse::vm_space_dummy(), )), global_state: args.global_args.state.clone(), @@ -560,9 +560,9 @@ pub struct CommonPlan { impl CommonPlan { pub fn new(args: &mut CreateSpecificPlanArgs) -> CommonPlan { - let immortal_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); - let los_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); - let nonmoving_meta = args.global_args.heap.specify_space(SpaceSpec::DontCare); + let immortal_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + let los_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); + let nonmoving_resp = args.global_args.heap.specify_space(VMRequest::Unrestricted); let base = BasePlan::new(args); @@ -570,13 +570,13 @@ impl CommonPlan { immortal: ImmortalSpace::new(args.get_space_args( "immortal", true, - immortal_meta.unwrap(), + immortal_resp.unwrap(), )), - los: LargeObjectSpace::new(args.get_space_args("los", true, los_meta.unwrap()), false), + los: LargeObjectSpace::new(args.get_space_args("los", true, los_resp.unwrap()), false), nonmoving: ImmortalSpace::new(args.get_space_args( "nonmoving", true, - nonmoving_meta.unwrap(), + nonmoving_resp.unwrap(), )), base, } diff --git a/src/plan/immix/global.rs b/src/plan/immix/global.rs index 0e70f0824d..5eca301f8a 100644 --- a/src/plan/immix/global.rs +++ b/src/plan/immix/global.rs @@ -13,7 +13,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::vm::VMBinding; use crate::{policy::immix::ImmixSpace, util::opaque_pointer::VMWorkerThread}; @@ -140,17 +140,17 @@ impl Immix { mut plan_args: CreateSpecificPlanArgs, space_args: ImmixSpaceArgs, ) -> Self { - let immix_space_meta = plan_args + let immix_space_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); let immix = Immix { immix_space: ImmixSpace::new( - plan_args.get_space_args("immix", true, immix_space_meta.unwrap()), + plan_args.get_space_args("immix", true, immix_space_resp.unwrap()), space_args, ), common, diff --git a/src/plan/markcompact/global.rs b/src/plan/markcompact/global.rs index ed1ef307ce..dbf08bf449 100644 --- a/src/plan/markcompact/global.rs +++ b/src/plan/markcompact/global.rs @@ -15,7 +15,7 @@ use crate::scheduler::gc_work::*; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::CopySemantics; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; #[cfg(not(feature = "vo_bit"))] use crate::util::metadata::vo_bit::VO_BIT_SIDE_METADATA_SPEC; @@ -192,16 +192,16 @@ impl MarkCompact { global_side_metadata_specs, }; - let mc_space_meta = plan_args + let mc_space_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); let mc_space = - MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_meta.unwrap())); + MarkCompactSpace::new(plan_args.get_space_args("mc", true, mc_space_resp.unwrap())); let res = MarkCompact { mc_space, common }; diff --git a/src/plan/marksweep/global.rs b/src/plan/marksweep/global.rs index 79117615e4..54142b0dbb 100644 --- a/src/plan/marksweep/global.rs +++ b/src/plan/marksweep/global.rs @@ -10,7 +10,7 @@ use crate::plan::PlanConstraints; use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::VMWorkerThread; use crate::vm::VMBinding; @@ -102,16 +102,16 @@ impl MarkSweep { global_side_metadata_specs, }; - let ms_meta = plan_args + let ms_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); let res = MarkSweep { - ms: MarkSweepSpace::new(plan_args.get_space_args("ms", true, ms_meta.unwrap())), + ms: MarkSweepSpace::new(plan_args.get_space_args("ms", true, ms_resp.unwrap())), common, }; diff --git a/src/plan/nogc/global.rs b/src/plan/nogc/global.rs index c178bdee1a..4629ced8f1 100644 --- a/src/plan/nogc/global.rs +++ b/src/plan/nogc/global.rs @@ -9,7 +9,7 @@ use crate::policy::immortalspace::ImmortalSpace; use crate::policy::space::Space; use crate::scheduler::GCWorkScheduler; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::*; use crate::vm::VMBinding; @@ -87,18 +87,18 @@ impl NoGC { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let nogc_space_meta = plan_args + let nogc_space_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); - let immortal_meta = plan_args + .specify_space(VMRequest::Unrestricted); + let immortal_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); - let los = plan_args + .specify_space(VMRequest::Unrestricted); + let los_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let base = BasePlan::new(&mut plan_args); @@ -107,14 +107,14 @@ impl NoGC { nogc_space: NoGCImmortalSpace::new(plan_args.get_space_args( "nogc_space", cfg!(not(feature = "nogc_no_zeroing")), - nogc_space_meta.unwrap(), + nogc_space_resp.unwrap(), )), immortal: ImmortalSpace::new(plan_args.get_space_args( "immortal", true, - immortal_meta.unwrap(), + immortal_resp.unwrap(), )), - los: ImmortalSpace::new(plan_args.get_space_args("los", true, los.unwrap())), + los: ImmortalSpace::new(plan_args.get_space_args("los", true, los_resp.unwrap())), base, }; diff --git a/src/plan/pageprotect/global.rs b/src/plan/pageprotect/global.rs index 540a90fa3f..bd51dcde70 100644 --- a/src/plan/pageprotect/global.rs +++ b/src/plan/pageprotect/global.rs @@ -8,7 +8,7 @@ use crate::plan::PlanConstraints; use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::{plan::global::BasePlan, vm::VMBinding}; use crate::{ @@ -97,17 +97,17 @@ impl PageProtect { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let space_meta = plan_args + let space_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); let ret = PageProtect { space: LargeObjectSpace::new( - plan_args.get_space_args("pageprotect", true, space_meta.unwrap()), + plan_args.get_space_args("pageprotect", true, space_resp.unwrap()), true, ), common, diff --git a/src/plan/semispace/global.rs b/src/plan/semispace/global.rs index f0122df0d0..9894dc227d 100644 --- a/src/plan/semispace/global.rs +++ b/src/plan/semispace/global.rs @@ -11,7 +11,7 @@ use crate::policy::space::Space; use crate::scheduler::*; use crate::util::alloc::allocators::AllocatorSelector; use crate::util::copy::*; -use crate::util::heap::heap_meta::SpaceSpec; +use crate::util::heap::heap_meta::VMRequest; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::opaque_pointer::VMWorkerThread; use crate::{plan::global::BasePlan, vm::VMBinding}; @@ -137,14 +137,14 @@ impl SemiSpace { global_side_metadata_specs: SideMetadataContext::new_global_specs(&[]), }; - let copyspace0_meta = plan_args + let copyspace0_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); - let copyspace1_meta = plan_args + .specify_space(VMRequest::Unrestricted); + let copyspace1_resp = plan_args .global_args .heap - .specify_space(SpaceSpec::DontCare); + .specify_space(VMRequest::Unrestricted); // Spaces will eventually be placed by `BasePlan`. let common = CommonPlan::new(&mut plan_args); @@ -152,11 +152,11 @@ impl SemiSpace { let res = SemiSpace { hi: AtomicBool::new(false), copyspace0: CopySpace::new( - plan_args.get_space_args("copyspace0", true, copyspace0_meta.unwrap()), + plan_args.get_space_args("copyspace0", true, copyspace0_resp.unwrap()), false, ), copyspace1: CopySpace::new( - plan_args.get_space_args("copyspace1", true, copyspace1_meta.unwrap()), + plan_args.get_space_args("copyspace1", true, copyspace1_resp.unwrap()), true, ), common, diff --git a/src/policy/space.rs b/src/policy/space.rs index 521caae9b9..efb5d0aa8e 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -2,7 +2,7 @@ use crate::global_state::GlobalState; use crate::plan::PlanConstraints; use crate::scheduler::GCWorkScheduler; use crate::util::conversions::*; -use crate::util::heap::heap_meta::SpaceMeta; +use crate::util::heap::heap_meta::VMResponse; use crate::util::metadata::side_metadata::{ SideMetadataContext, SideMetadataSanity, SideMetadataSpec, }; @@ -370,7 +370,7 @@ pub(crate) fn print_vm_map( write!(out, "N")?; } write!(out, " ")?; - let SpaceMeta { + let VMResponse { space_id, start, extent, @@ -389,7 +389,7 @@ impl_downcast!(Space where VM: VMBinding); pub struct CommonSpace { pub name: &'static str, pub descriptor: SpaceDescriptor, - pub space_meta: SpaceMeta, + pub space_meta: VMResponse, /// For a copying space that allows sft_trace_object(), this should be set before each GC so we know /// the copy semantics for the space. @@ -441,7 +441,7 @@ pub struct PolicyCreateSpaceArgs<'a, VM: VMBinding> { pub struct PlanCreateSpaceArgs<'a, VM: VMBinding> { pub name: &'static str, pub zeroed: bool, - pub space_meta: SpaceMeta, + pub space_meta: VMResponse, pub global_side_metadata_specs: Vec, pub vm_map: &'static dyn VMMap, pub mmapper: &'static dyn Mmapper, @@ -472,7 +472,7 @@ impl<'a, VM: VMBinding> PlanCreateSpaceArgs<'a, VM> { impl CommonSpace { pub fn new(args: PolicyCreateSpaceArgs) -> Self { let space_meta = args.plan_args.space_meta; - let SpaceMeta { + let VMResponse { space_id: _space_id, // TODO: Let SpaceDescriptor use this space_id start, extent, diff --git a/src/util/heap/heap_meta.rs b/src/util/heap/heap_meta.rs index 2f69bd50b2..46a3948df1 100644 --- a/src/util/heap/heap_meta.rs +++ b/src/util/heap/heap_meta.rs @@ -5,18 +5,18 @@ //! It is used as following. //! //! 1. A plan declares all the spaces it wants to create using the `specify_space` method. For -//! each space, it passes a [`SpaceSpec`] which specifies the requirements for each space, +//! each space, it passes a [`VMRequest`] which specifies the requirements for each space, //! including whether the space is contiguous, whether it has a fixed extent, and whether it //! should be place at the low end or high end of the heap range, etc. The `specify_space` -//! method returns a [`FutureSpaceMeta`] for each space which can be used later. +//! method returns a [`PendingVMResponse`] for each space which can be used later. //! 2. After all spaces are specified, the plan calls the `place_spaces` method. It determines //! the locations (starts and extends) and contiguousness of all spaces according to the policy //! specified by [`crate::util::heap::layout::vm_layout::vm_layout`]. -//! 3. Then the plan calls `unwrap()` on each [`FutureSpaceMeta`] to get a [`SpaceMeta`] which +//! 3. Then the plan calls `unwrap()` on each [`PendingVMResponse`] to get a [`VMResponse`] which //! holds the the placement decision for each space (start, extent, contiguousness, etc.). //! Using such information, the space can create each concrete spaces. //! -//! In summary, the plan specifies all spaces before `HeapMeta` makes placement decision, and all +//! In summary, the plan specifies all spaces before [`HeapMeta`] makes placement decision, and all //! spaces know their locations the moment they are created. //! //! By doing so, we can avoid creating spaces first and then computing their start addresses and @@ -38,30 +38,36 @@ use crate::util::Address; /// /// TODO: This type needs a better name. pub struct HeapMeta { + /// The start of the heap range (inclusive). heap_start: Address, + /// The end of the heap range (exclusive). heap_limit: Address, + /// The address range for discontiguous spaces (if exists). discontiguous_range: Option>, + /// Request-response pairs for each space. entries: Vec, } -/// A space specification and a "promise" for sending `SpaceMeta` to the user (plan). +/// A request-response pair. struct SpaceEntry { - spec: SpaceSpec, - promise_meta: PromiseSpaceMeta, + req: VMRequest, + resp: PendingVMResponseWriter, } -/// This enum specifies the requirement of space placement. +/// A virtual memory (VM) request specifies the requirement for placing a space in the virtual +/// address space. It will be processed by [`HeapMeta`]. /// -/// Note that the result of space placement (represented by `SpaceMeta`) may give the space a +/// Note that the result of space placement (represented by [`VMResponse`]) may give the space a /// larger address range than requested. For example, on systems with a generous address space, /// the space placement strategy may give each space a contiguous 2TiB address space even if it /// requests a small extent. -pub enum SpaceSpec { +#[derive(Debug)] +pub enum VMRequest { /// There is no size, location, or contiguousness requirement for the space. In a confined /// address space, the space may be given a discontiguous address range shared with other /// spaces; in a generous address space, the space may be given a very large contiguous address /// range solely owned by this space. - DontCare, + Unrestricted, /// Require a contiguous range of address of a fixed size. Extent { /// The size of the space, in bytes. Must be a multiple of chunks. @@ -82,23 +88,25 @@ pub enum SpaceSpec { }, } -impl SpaceSpec { - fn dont_care(&self) -> bool { - matches!(self, SpaceSpec::DontCare) +impl VMRequest { + /// Return `true` if the current `VMRequest` is unrestricted. + fn unrestricted(&self) -> bool { + matches!(self, VMRequest::Unrestricted) } + /// Return `true` if the space should be placed at the high end of the address space. fn top(&self) -> bool { match *self { - SpaceSpec::DontCare => false, - SpaceSpec::Extent { top, .. } => top, - SpaceSpec::Fraction { top, .. } => top, + VMRequest::Unrestricted => false, + VMRequest::Extent { top, .. } => top, + VMRequest::Fraction { top, .. } => top, } } } /// This struct represents the placement decision of a space. #[derive(Debug)] -pub struct SpaceMeta { +pub struct VMResponse { /// An assigned ID of the space. Guaranteed to be unique. pub space_id: usize, /// The start of the address range of the space. For discontiguous spaces, this range will be @@ -110,8 +118,8 @@ pub struct SpaceMeta { pub contiguous: bool, } -impl SpaceMeta { - /// Create a dummy `SpaceMeta for `VMSpace` because the address range of `VMSpace` is not +impl VMResponse { + /// Create a dummy `VMResponse` for `VMSpace` because the address range of `VMSpace` is not /// determined by `HeapMeta`. pub(crate) fn vm_space_dummy() -> Self { Self { @@ -123,36 +131,38 @@ impl SpaceMeta { } } -/// A `SpaceMeta` that will be provided in the future. +/// A `VMResponse` that will be provided in the future. #[derive(Clone)] -pub struct FutureSpaceMeta { - inner: Rc>>, +pub struct PendingVMResponse { + inner: Rc>>, } -impl FutureSpaceMeta { - /// Unwrap `self` and get a `SpaceMeta` instance. Can only be called after calling +impl PendingVMResponse { + /// Unwrap `self` and get a `VMResponse` instance. Can only be called after calling /// `HeapMeta::place_spaces()`. - pub fn unwrap(self) -> SpaceMeta { + pub fn unwrap(self) -> VMResponse { let mut opt = self.inner.borrow_mut(); opt.take() - .expect("Attempt to get SpaceMeta before calling HeapMeta::place_spaces()") + .expect("Attempt to get VMResponse before calling HeapMeta::place_spaces()") } } -/// The struct for `HeapMeta` to provide a `SpaceMeta` instance for its user. -struct PromiseSpaceMeta { - inner: Rc>>, +/// The struct for `HeapMeta` to provide a `VMResponse` instance for its user. +struct PendingVMResponseWriter { + inner: Rc>>, } -impl PromiseSpaceMeta { - fn provide(&mut self, space_meta: SpaceMeta) { +impl PendingVMResponseWriter { + fn provide(&mut self, resp: VMResponse) { let mut opt = self.inner.borrow_mut(); assert!(opt.is_none()); - *opt = Some(space_meta); + *opt = Some(resp); } } impl HeapMeta { + /// Create a `HeapMeta` instance. The heap range will be determined by + /// [`crate::util::heap::layout::vm_layout::vm_layout`]. pub fn new() -> Self { HeapMeta { heap_start: vm_layout().heap_start, @@ -163,14 +173,14 @@ impl HeapMeta { } /// Declare a space and specify the detailed requirements. - pub fn specify_space(&mut self, spec: SpaceSpec) -> FutureSpaceMeta { - let shared_meta = Rc::new(RefCell::new(None)); - let future_meta = FutureSpaceMeta { - inner: shared_meta.clone(), + pub fn specify_space(&mut self, req: VMRequest) -> PendingVMResponse { + let shared_resp = Rc::new(RefCell::new(None)); + let pending_resp = PendingVMResponse { + inner: shared_resp.clone(), }; - let promise_meta = PromiseSpaceMeta { inner: shared_meta }; - self.entries.push(SpaceEntry { spec, promise_meta }); - future_meta + let resp = PendingVMResponseWriter { inner: shared_resp }; + self.entries.push(SpaceEntry { req, resp }); + pending_resp } /// Determine the locations of all specified spaces. @@ -180,35 +190,41 @@ impl HeapMeta { let mut reserver = AddressRangeReserver::new(self.heap_start, self.heap_limit); if force_use_contiguous_spaces { - debug!("Placing spaces in a generous address space"); + debug!( + "Placing spaces in a generous address space: [{}, {})", + self.heap_start, self.heap_limit + ); let extent = vm_layout().max_space_extent(); for (i, entry) in self.entries.iter_mut().enumerate() { - let top = entry.spec.top(); + let top = entry.req.top(); let start = reserver.reserve(extent, top); - let meta = SpaceMeta { + let resp = VMResponse { space_id: i, start, extent, contiguous: true, }; - debug!(" SpaceMeta: {:?}", meta); - entry.promise_meta.provide(meta); + debug!(" VMResponse: {:?}", resp); + entry.resp.provide(resp); } } else { - debug!("Placing spaces in a confined address space"); + debug!( + "Placing spaces in a confined address space: [{}, {})", + self.heap_start, self.heap_limit + ); for (i, entry) in self.entries.iter_mut().enumerate() { - let (start, extent) = match entry.spec { - SpaceSpec::DontCare => continue, - SpaceSpec::Extent { extent, top } => { + let (start, extent) = match entry.req { + VMRequest::Unrestricted => continue, + VMRequest::Extent { extent, top } => { let start = reserver.reserve(extent, top); (start, extent) } - SpaceSpec::Fraction { frac, top } => { + VMRequest::Fraction { frac, top } => { // Taken from `crate::policy::space::get_frac_available`, but we currently - // don't have any plans that actually uses it. + // don't have any plans that actually use it. let extent = { trace!("AVAILABLE_START={}", self.heap_start); trace!("AVAILABLE_END={}", self.heap_limit); @@ -227,15 +243,15 @@ impl HeapMeta { } }; - let meta = SpaceMeta { + let resp = VMResponse { space_id: i, start, extent, contiguous: true, }; - debug!(" SpaceMeta: {:?}", meta); - entry.promise_meta.provide(meta); + debug!(" VMResponse: {:?}", resp); + entry.resp.provide(resp); } let discontig_range = reserver.remaining_range(); @@ -252,19 +268,19 @@ impl HeapMeta { let discontig_extent = discontig_end - discontig_start; for (i, entry) in self.entries.iter_mut().enumerate() { - if !entry.spec.dont_care() { + if !entry.req.unrestricted() { continue; } - let meta = SpaceMeta { + let resp = VMResponse { space_id: i, start: discontig_start, extent: discontig_extent, contiguous: false, }; - debug!(" SpaceMeta: {:?}", meta); - entry.promise_meta.provide(meta); + debug!(" VMResponse: {:?}", resp); + entry.resp.provide(resp); } }