diff --git a/src/memory_manager.rs b/src/memory_manager.rs index 06f3efb6b9..a2d6ceb10c 100644 --- a/src/memory_manager.rs +++ b/src/memory_manager.rs @@ -658,8 +658,8 @@ pub fn handle_user_collection_request( /// /// Arguments: /// * `object`: The object reference to query. -pub fn is_live_object(object: ObjectReference) -> bool { - object.is_live() +pub fn is_live_object(mmtk: &MMTK, object: ObjectReference) -> bool { + mmtk.get_plan().is_live_object(object) } /// Check if `addr` is the raw address of an object reference to an MMTk object. diff --git a/src/mmtk.rs b/src/mmtk.rs index 7931d18e06..e9089ce31c 100644 --- a/src/mmtk.rs +++ b/src/mmtk.rs @@ -23,7 +23,6 @@ use crate::util::sanity::sanity_checker::SanityChecker; #[cfg(feature = "extreme_assertions")] use crate::util::slot_logger::SlotLogger; use crate::util::statistics::stats::Stats; -use crate::vm::ReferenceGlue; use crate::vm::VMBinding; use std::cell::UnsafeCell; use std::collections::HashMap; @@ -113,9 +112,8 @@ pub struct MMTK { pub(crate) options: Arc, pub(crate) state: Arc, pub(crate) plan: UnsafeCell>>, - pub(crate) reference_processors: ReferenceProcessors, - pub(crate) finalizable_processor: - Mutex>::FinalizableType>>, + pub(crate) reference_processors: ReferenceProcessors, + pub(crate) finalizable_processor: Mutex>, pub(crate) scheduler: Arc>, #[cfg(feature = "sanity")] pub(crate) sanity_checker: Mutex>, @@ -207,14 +205,14 @@ impl MMTK { }, ); + let static_plan: &'static dyn Plan = unsafe { &*(&*plan as *const _) }; + MMTK { options, state, plan: UnsafeCell::new(plan), - reference_processors: ReferenceProcessors::new(), - finalizable_processor: Mutex::new(FinalizableProcessor::< - >::FinalizableType, - >::new()), + reference_processors: ReferenceProcessors::new(static_plan), + finalizable_processor: Mutex::new(FinalizableProcessor::new(static_plan)), scheduler, #[cfg(feature = "sanity")] sanity_checker: Mutex::new(SanityChecker::new()), diff --git a/src/plan/generational/copying/global.rs b/src/plan/generational/copying/global.rs index 66540ffb97..38a48d1f72 100644 --- a/src/plan/generational/copying/global.rs +++ b/src/plan/generational/copying/global.rs @@ -158,6 +158,10 @@ impl Plan for GenCopy { fn generational(&self) -> Option<&dyn GenerationalPlan> { Some(self) } + + fn is_live_object(&self, object: ObjectReference) -> bool { + self.gen.is_live_object(object) + } } impl GenerationalPlan for GenCopy { diff --git a/src/plan/generational/global.rs b/src/plan/generational/global.rs index 54e94a6334..2df1763442 100644 --- a/src/plan/generational/global.rs +++ b/src/plan/generational/global.rs @@ -277,6 +277,22 @@ impl CommonGenPlan { pub fn get_used_pages(&self) -> usize { self.nursery.reserved_pages() + self.common.get_used_pages() } + + pub fn is_live_object(&self, object: ObjectReference) -> bool { + use crate::policy::sft::SFT; + if self.is_current_gc_nursery() { + if self.nursery.in_space(object) { + self.nursery.is_live(object) + } else if self.common.get_los().in_space(object) { + self.common.get_los().is_live(object) + } else { + true + } + } else { + let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) }; + sft.is_live(object) + } + } } /// This trait includes methods that are specific to generational plans. This trait needs diff --git a/src/plan/generational/immix/global.rs b/src/plan/generational/immix/global.rs index 02e9df1b9f..64d31578bc 100644 --- a/src/plan/generational/immix/global.rs +++ b/src/plan/generational/immix/global.rs @@ -209,6 +209,10 @@ impl Plan for GenImmix { fn generational(&self) -> Option<&dyn GenerationalPlan> { Some(self) } + + fn is_live_object(&self, object: ObjectReference) -> bool { + self.gen.is_live_object(object) + } } impl GenerationalPlan for GenImmix { diff --git a/src/plan/global.rs b/src/plan/global.rs index 5cb6ce9bb9..021ed022d8 100644 --- a/src/plan/global.rs +++ b/src/plan/global.rs @@ -358,6 +358,11 @@ pub trait Plan: 'static + HasSpaces + Sync + Downcast { space.verify_side_metadata_sanity(&mut side_metadata_sanity_checker); }) } + + fn is_live_object(&self, object: ObjectReference) -> bool { + let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) }; + sft.is_live(object) + } } impl_downcast!(Plan assoc VM); diff --git a/src/plan/sticky/immix/global.rs b/src/plan/sticky/immix/global.rs index a3cac41c61..56db34d3c4 100644 --- a/src/plan/sticky/immix/global.rs +++ b/src/plan/sticky/immix/global.rs @@ -18,6 +18,7 @@ use crate::util::heap::gc_trigger::SpaceStats; use crate::util::metadata::log_bit::UnlogBitsOperation; use crate::util::metadata::side_metadata::SideMetadataContext; use crate::util::statistics::counter::EventCounter; +use crate::util::ObjectReference; use crate::vm::ObjectModel; use crate::vm::VMBinding; use crate::Plan; @@ -224,6 +225,22 @@ impl Plan for StickyImmix { } true } + + fn is_live_object(&self, object: ObjectReference) -> bool { + use crate::policy::sft::SFT; + if self.is_current_gc_nursery() { + if self.immix.immix_space.in_space(object) { + self.immix.immix_space.is_live(object) + } else if self.immix.common().get_los().in_space(object) { + self.immix.common().get_los().is_live(object) + } else { + true + } + } else { + let sft = unsafe { crate::mmtk::SFT_MAP.get_unchecked(object.to_raw_address()) }; + sft.is_live(object) + } + } } impl GenerationalPlan for StickyImmix { diff --git a/src/policy/immix/immixspace.rs b/src/policy/immix/immixspace.rs index 08805df817..0dec9588ca 100644 --- a/src/policy/immix/immixspace.rs +++ b/src/policy/immix/immixspace.rs @@ -742,7 +742,7 @@ impl ImmixSpace { ); queue.enqueue(new_object); - debug_assert!(new_object.is_live()); + debug_assert!(new_object.is_reachable()); new_object } } diff --git a/src/policy/space.rs b/src/policy/space.rs index 4582fcfb3f..3c2f4b2ae3 100644 --- a/src/policy/space.rs +++ b/src/policy/space.rs @@ -784,7 +784,7 @@ impl CommonSpace { VM::VMObjectModel::GLOBAL_LOG_BIT_SPEC.is_unlogged::(object, Ordering::Relaxed), ); } - println!("is live = {}", object.is_live()); + println!("is reachable = {}", object.is_reachable()); } } diff --git a/src/util/address.rs b/src/util/address.rs index c87a5d3abb..032840b1d2 100644 --- a/src/util/address.rs +++ b/src/util/address.rs @@ -674,11 +674,6 @@ impl ObjectReference { unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_reachable(self) } - /// Is the object live, determined by the policy? - pub fn is_live(self) -> bool { - unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_live(self) - } - /// Can the object be moved? pub fn is_movable(self) -> bool { unsafe { SFT_MAP.get_unchecked(self.to_raw_address()) }.is_movable() diff --git a/src/util/finalizable_processor.rs b/src/util/finalizable_processor.rs index 41666088a0..66e43e2d5d 100644 --- a/src/util/finalizable_processor.rs +++ b/src/util/finalizable_processor.rs @@ -5,43 +5,51 @@ use crate::util::reference_processor::RescanReferences; use crate::util::ObjectReference; use crate::util::VMWorkerThread; use crate::vm::Finalizable; -use crate::vm::{Collection, VMBinding}; +use crate::vm::{Collection, ReferenceGlue, VMBinding}; use crate::MMTK; use std::marker::PhantomData; +type F = <::VMReferenceGlue as ReferenceGlue>::FinalizableType; + /// A special processor for Finalizable objects. // TODO: we should consider if we want to merge FinalizableProcessor with ReferenceProcessor, // and treat final reference as a special reference type in ReferenceProcessor. -#[derive(Default)] -pub struct FinalizableProcessor { +pub struct FinalizableProcessor { + plan: &'static dyn crate::plan::Plan, /// Candidate objects that has finalizers with them - candidates: Vec, + candidates: Vec>, /// Index into candidates to record where we are up to in the last scan of the candidates. /// Index after nursery_index are new objects inserted after the last GC. nursery_index: usize, /// Objects that can be finalized. They are actually dead, but we keep them alive /// until the binding pops them from the queue. - ready_for_finalize: Vec, + ready_for_finalize: Vec>, } -impl FinalizableProcessor { - pub fn new() -> Self { +impl FinalizableProcessor { + pub fn new(plan: &'static dyn crate::plan::Plan) -> Self { Self { + plan, candidates: vec![], nursery_index: 0, ready_for_finalize: vec![], } } - pub fn add(&mut self, object: F) { + pub fn add(&mut self, object: F) { self.candidates.push(object); } - fn forward_finalizable_reference(e: &mut E, finalizable: &mut F) { + fn forward_finalizable_reference(e: &mut E, finalizable: &mut F) { finalizable.keep_alive::(e); } - pub fn scan(&mut self, tls: VMWorkerThread, e: &mut E, nursery: bool) { + pub fn scan>( + &mut self, + tls: VMWorkerThread, + e: &mut E, + nursery: bool, + ) { let start = if nursery { self.nursery_index } else { 0 }; // We should go through ready_for_finalize objects and keep them alive. @@ -51,11 +59,11 @@ impl FinalizableProcessor { self.candidates.append(&mut self.ready_for_finalize); debug_assert!(self.ready_for_finalize.is_empty()); - for mut f in self.candidates.drain(start..).collect::>() { + for mut f in self.candidates.drain(start..).collect::>>() { let reff = f.get_reference(); trace!("Pop {:?} for finalization", reff); - if reff.is_live() { - FinalizableProcessor::::forward_finalizable_reference(e, &mut f); + if self.plan.is_live_object(reff) { + FinalizableProcessor::::forward_finalizable_reference(e, &mut f); trace!("{:?} is live, push {:?} back to candidates", reff, f); self.candidates.push(f); continue; @@ -79,25 +87,25 @@ impl FinalizableProcessor { <::VM as VMBinding>::VMCollection::schedule_finalization(tls); } - pub fn forward_candidate(&mut self, e: &mut E, _nursery: bool) { + pub fn forward_candidate>(&mut self, e: &mut E, _nursery: bool) { self.candidates .iter_mut() - .for_each(|f| FinalizableProcessor::::forward_finalizable_reference(e, f)); + .for_each(|f| FinalizableProcessor::::forward_finalizable_reference(e, f)); e.flush(); } - pub fn forward_finalizable(&mut self, e: &mut E, _nursery: bool) { + pub fn forward_finalizable>(&mut self, e: &mut E, _nursery: bool) { self.ready_for_finalize .iter_mut() - .for_each(|f| FinalizableProcessor::::forward_finalizable_reference(e, f)); + .for_each(|f| FinalizableProcessor::::forward_finalizable_reference(e, f)); e.flush(); } - pub fn get_ready_object(&mut self) -> Option { + pub fn get_ready_object(&mut self) -> Option> { self.ready_for_finalize.pop() } - pub fn get_all_finalizers(&mut self) -> Vec { + pub fn get_all_finalizers(&mut self) -> Vec> { let mut ret = std::mem::take(&mut self.candidates); let ready_objects = std::mem::take(&mut self.ready_for_finalize); ret.extend(ready_objects); @@ -108,12 +116,12 @@ impl FinalizableProcessor { ret } - pub fn get_finalizers_for(&mut self, object: ObjectReference) -> Vec { + pub fn get_finalizers_for(&mut self, object: ObjectReference) -> Vec> { // Drain filter for finalizers that equal to 'object': // * for elements that equal to 'object', they will be removed from the original vec, and returned. // * for elements that do not equal to 'object', they will be left in the original vec. // TODO: We should replace this with `vec.drain_filter()` when it is stablized. - let drain_filter = |vec: &mut Vec| -> Vec { + let drain_filter = |vec: &mut Vec>| -> Vec> { let mut i = 0; let mut ret = vec![]; while i < vec.len() { @@ -126,7 +134,7 @@ impl FinalizableProcessor { } ret }; - let mut ret: Vec = drain_filter(&mut self.candidates); + let mut ret: Vec> = drain_filter(&mut self.candidates); ret.extend(drain_filter(&mut self.ready_for_finalize)); // We removed objects from candidates. Reset nursery_index diff --git a/src/util/reference_processor.rs b/src/util/reference_processor.rs index a5be7d3d4d..5bc2225638 100644 --- a/src/util/reference_processor.rs +++ b/src/util/reference_processor.rs @@ -5,6 +5,7 @@ use std::sync::Mutex; use std::vec::Vec; use crate::plan::is_nursery_gc; +use crate::plan::Plan; use crate::scheduler::ProcessEdgesWork; use crate::scheduler::WorkBucketStage; use crate::util::ObjectReference; @@ -15,22 +16,22 @@ use crate::vm::VMBinding; /// Holds all reference processors for each weak reference Semantics. /// Currently this is based on Java's weak reference semantics (soft/weak/phantom). /// We should make changes to make this general rather than Java specific. -pub struct ReferenceProcessors { - soft: ReferenceProcessor, - weak: ReferenceProcessor, - phantom: ReferenceProcessor, +pub struct ReferenceProcessors { + soft: ReferenceProcessor, + weak: ReferenceProcessor, + phantom: ReferenceProcessor, } -impl ReferenceProcessors { - pub fn new() -> Self { +impl ReferenceProcessors { + pub fn new(plan: &'static dyn Plan) -> Self { ReferenceProcessors { - soft: ReferenceProcessor::new(Semantics::SOFT), - weak: ReferenceProcessor::new(Semantics::WEAK), - phantom: ReferenceProcessor::new(Semantics::PHANTOM), + soft: ReferenceProcessor::new(plan, Semantics::SOFT), + weak: ReferenceProcessor::new(plan, Semantics::WEAK), + phantom: ReferenceProcessor::new(plan, Semantics::PHANTOM), } } - pub fn get(&self, semantics: Semantics) -> &ReferenceProcessor { + pub fn get(&self, semantics: Semantics) -> &ReferenceProcessor { match semantics { Semantics::SOFT => &self.soft, Semantics::WEAK => &self.weak, @@ -56,10 +57,10 @@ impl ReferenceProcessors { /// This will invoke enqueue for each reference processor, which will /// call back to the VM to enqueue references whose referents are cleared /// in this GC. - pub fn enqueue_refs(&self, tls: VMWorkerThread) { - self.soft.enqueue::(tls); - self.weak.enqueue::(tls); - self.phantom.enqueue::(tls); + pub fn enqueue_refs(&self, tls: VMWorkerThread) { + self.soft.enqueue(tls); + self.weak.enqueue(tls); + self.phantom.enqueue(tls); } /// A separate reference forwarding step. Normally when we scan refs, we deal with forwarding. @@ -86,25 +87,19 @@ impl ReferenceProcessors { } /// Scan soft references. - pub fn scan_soft_refs(&self, mmtk: &'static MMTK) { + pub fn scan_soft_refs(&self, mmtk: &'static MMTK) { // This will update the references (and the referents). - self.soft.scan::(is_nursery_gc(mmtk.get_plan())); + self.soft.scan(is_nursery_gc(mmtk.get_plan())); } /// Scan weak references. - pub fn scan_weak_refs(&self, mmtk: &'static MMTK) { - self.weak.scan::(is_nursery_gc(mmtk.get_plan())); + pub fn scan_weak_refs(&self, mmtk: &'static MMTK) { + self.weak.scan(is_nursery_gc(mmtk.get_plan())); } /// Scan phantom references. - pub fn scan_phantom_refs(&self, mmtk: &'static MMTK) { - self.phantom.scan::(is_nursery_gc(mmtk.get_plan())); - } -} - -impl Default for ReferenceProcessors { - fn default() -> Self { - Self::new() + pub fn scan_phantom_refs(&self, mmtk: &'static MMTK) { + self.phantom.scan(is_nursery_gc(mmtk.get_plan())); } } @@ -124,7 +119,8 @@ const INITIAL_SIZE: usize = 256; /// 2. We scan references after the GC determins liveness. /// 3. We forward references if the GC needs forwarding after liveness. /// 4. We inform the binding of references whose referents are cleared during this GC by enqueue'ing. -pub struct ReferenceProcessor { +pub struct ReferenceProcessor { + plan: &'static dyn Plan, /// Most of the reference processor is protected by a mutex. sync: Mutex, @@ -169,9 +165,10 @@ struct ReferenceProcessorSync { nursery_index: usize, } -impl ReferenceProcessor { - pub fn new(semantics: Semantics) -> Self { +impl ReferenceProcessor { + pub fn new(plan: &'static dyn Plan, semantics: Semantics) -> Self { ReferenceProcessor { + plan, sync: Mutex::new(ReferenceProcessorSync { references: HashSet::with_capacity(INITIAL_SIZE), enqueued_references: vec![], @@ -208,16 +205,16 @@ impl ReferenceProcessor { /// Return the new `ObjectReference` of a referent if it is already moved, or its current /// `ObjectReference` otherwise. The referent must be live when calling this function. - fn get_forwarded_referent(referent: ObjectReference) -> ObjectReference { - debug_assert!(referent.is_live()); + fn get_forwarded_referent(&self, referent: ObjectReference) -> ObjectReference { + debug_assert!(self.plan.is_live_object(referent)); referent.get_forwarded_object().unwrap_or(referent) } /// Return the new `ObjectReference` of a reference object if it is already moved, or its /// current `ObjectReference` otherwise. The reference object must be live when calling this /// function. - fn get_forwarded_reference(object: ObjectReference) -> ObjectReference { - debug_assert!(object.is_live()); + fn get_forwarded_reference(&self, object: ObjectReference) -> ObjectReference { + debug_assert!(self.plan.is_live_object(object)); object.get_forwarded_object().unwrap_or(object) } @@ -251,7 +248,7 @@ impl ReferenceProcessor { } /// Inform the binding to enqueue the weak references whose referents were cleared in this GC. - pub fn enqueue(&self, tls: VMWorkerThread) { + pub fn enqueue(&self, tls: VMWorkerThread) { // We will acquire a lock below. If anyone tries to insert new weak refs which will acquire the same lock, a deadlock will occur. // This does happen for OpenJDK with ConcurrentImmix where a write barrier is triggered during the enqueueing of weak references, // and the write barrier scans the objects and attempts to add new weak references. @@ -291,6 +288,38 @@ impl ReferenceProcessor { self.allow_new_candidate(); } + // Forward a single reference + fn forward_reference( + &self, + trace: &mut E, + reference: ObjectReference, + ) -> ObjectReference { + { + use crate::vm::ObjectModel; + trace!( + "Forwarding reference: {} (size: {})", + reference, + ::VMObjectModel::get_current_size(reference) + ); + } + + if let Some(old_referent) = ::VMReferenceGlue::get_referent(reference) { + let new_referent = Self::trace_forward_object(trace, old_referent); + ::VMReferenceGlue::set_referent(reference, new_referent); + + trace!( + " referent: {} (forwarded to {})", + old_referent, + new_referent + ); + } + + let new_reference = Self::trace_forward_object(trace, reference); + trace!(" reference: forwarded to {}", new_reference); + + new_reference + } + /// Forward the reference tables in the reference processor. This is only needed if a plan does not forward /// objects in their first transitive closure. /// nursery is not used for this. @@ -298,49 +327,16 @@ impl ReferenceProcessor { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.forward({:?})", self.semantics); - // Forward a single reference - fn forward_reference( - trace: &mut E, - reference: ObjectReference, - ) -> ObjectReference { - { - use crate::vm::ObjectModel; - trace!( - "Forwarding reference: {} (size: {})", - reference, - ::VMObjectModel::get_current_size(reference) - ); - } - - if let Some(old_referent) = - ::VMReferenceGlue::get_referent(reference) - { - let new_referent = ReferenceProcessor::trace_forward_object(trace, old_referent); - ::VMReferenceGlue::set_referent(reference, new_referent); - - trace!( - " referent: {} (forwarded to {})", - old_referent, - new_referent - ); - } - - let new_reference = ReferenceProcessor::trace_forward_object(trace, reference); - trace!(" reference: forwarded to {}", new_reference); - - new_reference - } - sync.references = sync .references .iter() - .map(|reff| forward_reference::(trace, *reff)) + .map(|reff| self.forward_reference::(trace, *reff)) .collect(); sync.enqueued_references = sync .enqueued_references .iter() - .map(|reff| forward_reference::(trace, *reff)) + .map(|reff| self.forward_reference::(trace, *reff)) .collect(); debug!("Ending ReferenceProcessor.forward({:?})", self.semantics); @@ -354,7 +350,7 @@ impl ReferenceProcessor { // TODO: nursery is currently ignored. We used to use Vec for the reference table, and use an int // to point to the reference that we last scanned. However, when we use HashSet for reference table, // we can no longer do that. - fn scan(&self, _nursery: bool) { + fn scan(&self, _nursery: bool) { let mut sync = self.sync.lock().unwrap(); debug!("Starting ReferenceProcessor.scan({:?})", self.semantics); @@ -373,7 +369,7 @@ impl ReferenceProcessor { let new_set: HashSet = sync .references .iter() - .filter_map(|reff| self.process_reference::(*reff, &mut enqueued_references)) + .filter_map(|reff| self.process_reference(*reff, &mut enqueued_references)) .collect(); let num_old = sync.references.len(); @@ -425,7 +421,7 @@ impl ReferenceProcessor { for reference in sync.references.iter() { trace!("Processing reference: {:?}", reference); - if !reference.is_live() { + if !self.plan.is_live_object(*reference) { // Reference is currently unreachable but may get reachable by the // following trace. We postpone the decision. continue; @@ -452,7 +448,7 @@ impl ReferenceProcessor { /// /// If a None value is returned, the reference can be removed from the reference table. Otherwise, the updated reference should be kept /// in the reference table. - fn process_reference( + fn process_reference( &self, reference: ObjectReference, enqueued_references: &mut Vec, @@ -461,14 +457,14 @@ impl ReferenceProcessor { // If the reference is dead, we're done with it. Let it (and // possibly its referent) be garbage-collected. - if !reference.is_live() { + if !self.plan.is_live_object(reference) { VM::VMReferenceGlue::clear_referent(reference); trace!(" UNREACHABLE reference: {}", reference); return None; } // The reference object is live. - let new_reference = Self::get_forwarded_reference(reference); + let new_reference = self.get_forwarded_reference(reference); trace!(" forwarded to: {}", new_reference); // Get the old referent. @@ -484,11 +480,11 @@ impl ReferenceProcessor { return None; }; - if old_referent.is_live() { + if self.plan.is_live_object(old_referent) { // Referent is still reachable in a way that is as strong as // or stronger than the current reference level. - let new_referent = Self::get_forwarded_referent(old_referent); - debug_assert!(new_referent.is_live()); + let new_referent = self.get_forwarded_referent(old_referent); + debug_assert!(self.plan.is_live_object(new_referent)); trace!(" forwarded referent to: {}", new_referent); // The reference object stays on the waiting list, and the @@ -612,7 +608,7 @@ impl RefForwarding { pub(crate) struct RefEnqueue(PhantomData); impl GCWork for RefEnqueue { fn do_work(&mut self, worker: &mut GCWorker, mmtk: &'static MMTK) { - mmtk.reference_processors.enqueue_refs::(worker.tls); + mmtk.reference_processors.enqueue_refs(worker.tls); } } impl RefEnqueue {