Skip to content

Commit de79b1e

Browse files
authored
Handle mmap failure properly (#307)
1 parent 5906d0d commit de79b1e

File tree

9 files changed

+154
-55
lines changed

9 files changed

+154
-55
lines changed

src/policy/space.rs

+6-2
Original file line numberDiff line numberDiff line change
@@ -269,11 +269,15 @@ pub trait Space<VM: VMBinding>: 'static + SFT + Sync + Downcast {
269269
// adding a space lock here.
270270
let bytes = conversions::pages_to_bytes(res.pages);
271271
self.grow_space(res.start, bytes, res.new_chunk);
272-
self.common().mmapper.ensure_mapped(
272+
// Mmap the pages and handle error. In case of any error,
273+
// we will either call back to the VM for OOM, or simply panic.
274+
if let Err(mmap_error) = self.common().mmapper.ensure_mapped(
273275
res.start,
274276
res.pages,
275277
&self.common().metadata,
276-
);
278+
) {
279+
memory::handle_mmap_error::<VM>(mmap_error, tls);
280+
}
277281

278282
// TODO: Concurrent zeroing
279283
if self.common().zeroed {

src/util/heap/layout/byte_map_mmapper.rs

+31-28
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,7 @@ use std::sync::atomic::Ordering;
1111
use std::sync::Mutex;
1212

1313
use crate::util::memory::{dzmmap_noreplace, mprotect, munprotect};
14+
use std::io::Result;
1415
use std::mem::transmute;
1516

1617
const UNMAPPED: u8 = 0;
@@ -48,7 +49,7 @@ impl Mmapper for ByteMapMmapper {
4849
}
4950
}
5051

51-
fn ensure_mapped(&self, start: Address, pages: usize, metadata: &SideMetadata) {
52+
fn ensure_mapped(&self, start: Address, pages: usize, metadata: &SideMetadata) -> Result<()> {
5253
let start_chunk = Self::address_to_mmap_chunks_down(start);
5354
let end_chunk = Self::address_to_mmap_chunks_up(start + pages_to_bytes(pages));
5455
trace!(
@@ -68,27 +69,15 @@ impl Mmapper for ByteMapMmapper {
6869
let guard = self.lock.lock().unwrap();
6970
// might have become MAPPED here
7071
if self.mapped[chunk].load(Ordering::Relaxed) == UNMAPPED {
71-
match dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES) {
72-
Ok(_) => {
73-
self.map_metadata(mmap_start, metadata)
74-
.expect("failed to map metadata memory");
75-
if VERBOSE {
76-
trace!(
77-
"mmap succeeded at chunk {} {} with len = {}",
78-
chunk,
79-
mmap_start,
80-
MMAP_CHUNK_BYTES
81-
);
82-
}
83-
}
84-
Err(e) => {
85-
drop(guard);
86-
panic!(
87-
"ensureMapped failed on address {}\n\
88-
Can't get more space with mmap(): {}",
89-
mmap_start, e
90-
);
91-
}
72+
// map data
73+
let res = dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES);
74+
if res.is_err() {
75+
return res;
76+
}
77+
// map metadata
78+
let res = self.map_metadata(mmap_start, metadata);
79+
if res.is_err() {
80+
return res;
9281
}
9382
}
9483

@@ -114,6 +103,8 @@ impl Mmapper for ByteMapMmapper {
114103
self.mapped[chunk].store(MAPPED, Ordering::Relaxed);
115104
drop(guard);
116105
}
106+
107+
Ok(())
117108
}
118109

119110
/**
@@ -268,7 +259,9 @@ mod tests {
268259
|| {
269260
let mmapper = ByteMapMmapper::new();
270261
let pages = 1;
271-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA);
262+
mmapper
263+
.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA)
264+
.unwrap();
272265

273266
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
274267
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
@@ -292,7 +285,9 @@ mod tests {
292285
|| {
293286
let mmapper = ByteMapMmapper::new();
294287
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
295-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA);
288+
mmapper
289+
.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA)
290+
.unwrap();
296291

297292
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
298293
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
@@ -317,7 +312,9 @@ mod tests {
317312
let mmapper = ByteMapMmapper::new();
318313
let pages =
319314
(MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2) >> LOG_BYTES_IN_PAGE as usize;
320-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA);
315+
mmapper
316+
.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA)
317+
.unwrap();
321318

322319
let start_chunk = ByteMapMmapper::address_to_mmap_chunks_down(FIXED_ADDRESS);
323320
let end_chunk = ByteMapMmapper::address_to_mmap_chunks_up(
@@ -343,7 +340,9 @@ mod tests {
343340
// map 2 chunks
344341
let mmapper = ByteMapMmapper::new();
345342
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
346-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA);
343+
mmapper
344+
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA)
345+
.unwrap();
347346

348347
// protect 1 chunk
349348
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
@@ -367,7 +366,9 @@ mod tests {
367366
// map 2 chunks
368367
let mmapper = ByteMapMmapper::new();
369368
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
370-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA);
369+
mmapper
370+
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA)
371+
.unwrap();
371372

372373
// protect 1 chunk
373374
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
@@ -377,7 +378,9 @@ mod tests {
377378
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
378379

379380
// ensure mapped - this will unprotect the previously protected chunk
380-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA);
381+
mmapper
382+
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA)
383+
.unwrap();
381384
assert_eq!(mmapper.mapped[chunk].load(Ordering::Relaxed), MAPPED);
382385
assert_eq!(mmapper.mapped[chunk + 1].load(Ordering::Relaxed), MAPPED);
383386
},

src/util/heap/layout/fragmented_mapper.rs

+37-11
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@ use crate::util::Address;
44
use crate::util::{conversions, side_metadata::SideMetadata};
55
use atomic::{Atomic, Ordering};
66
use std::fmt;
7+
use std::io::Result;
78
use std::mem::transmute;
89
use std::sync::Mutex;
910

@@ -82,7 +83,12 @@ impl Mmapper for FragmentedMapper {
8283
}
8384
}
8485

85-
fn ensure_mapped(&self, mut start: Address, pages: usize, metadata: &SideMetadata) {
86+
fn ensure_mapped(
87+
&self,
88+
mut start: Address,
89+
pages: usize,
90+
metadata: &SideMetadata,
91+
) -> Result<()> {
8692
let end = start + conversions::pages_to_bytes(pages);
8793
// Iterate over the slabs covered
8894
while start < end {
@@ -109,10 +115,17 @@ impl Mmapper for FragmentedMapper {
109115
match entry.load(Ordering::Relaxed) {
110116
MapState::Unmapped => {
111117
let mmap_start = Self::chunk_index_to_address(base, chunk);
112-
crate::util::memory::dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES)
113-
.unwrap();
114-
self.map_metadata(mmap_start, metadata)
115-
.expect("failed to map metadata memory");
118+
// map data
119+
let res =
120+
crate::util::memory::dzmmap_noreplace(mmap_start, MMAP_CHUNK_BYTES);
121+
if res.is_err() {
122+
return res;
123+
}
124+
// map metadata
125+
let res = self.map_metadata(mmap_start, metadata);
126+
if res.is_err() {
127+
return res;
128+
}
116129
}
117130
MapState::Protected => {
118131
let mmap_start = Self::chunk_index_to_address(base, chunk);
@@ -126,6 +139,7 @@ impl Mmapper for FragmentedMapper {
126139
}
127140
start = high;
128141
}
142+
Ok(())
129143
}
130144

131145
/**
@@ -388,7 +402,9 @@ mod tests {
388402
with_cleanup(
389403
|| {
390404
let mmapper = FragmentedMapper::new();
391-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA);
405+
mmapper
406+
.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA)
407+
.unwrap();
392408

393409
let chunks = pages_to_chunks_up(pages);
394410
for i in 0..chunks {
@@ -414,7 +430,9 @@ mod tests {
414430
with_cleanup(
415431
|| {
416432
let mmapper = FragmentedMapper::new();
417-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA);
433+
mmapper
434+
.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA)
435+
.unwrap();
418436

419437
let chunks = pages_to_chunks_up(pages);
420438
for i in 0..chunks {
@@ -441,7 +459,9 @@ mod tests {
441459
with_cleanup(
442460
|| {
443461
let mmapper = FragmentedMapper::new();
444-
mmapper.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA);
462+
mmapper
463+
.ensure_mapped(FIXED_ADDRESS, pages, &NO_METADATA)
464+
.unwrap();
445465

446466
let chunks = pages_to_chunks_up(pages);
447467
for i in 0..chunks {
@@ -469,7 +489,9 @@ mod tests {
469489
// map 2 chunks
470490
let mmapper = FragmentedMapper::new();
471491
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
472-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA);
492+
mmapper
493+
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA)
494+
.unwrap();
473495

474496
// protect 1 chunk
475497
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
@@ -498,7 +520,9 @@ mod tests {
498520
// map 2 chunks
499521
let mmapper = FragmentedMapper::new();
500522
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize;
501-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA);
523+
mmapper
524+
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA)
525+
.unwrap();
502526

503527
// protect 1 chunk
504528
mmapper.protect(FIXED_ADDRESS, pages_per_chunk);
@@ -513,7 +537,9 @@ mod tests {
513537
);
514538

515539
// ensure mapped - this will unprotect the previously protected chunk
516-
mmapper.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA);
540+
mmapper
541+
.ensure_mapped(FIXED_ADDRESS, pages_per_chunk * 2, &NO_METADATA)
542+
.unwrap();
517543
assert_eq!(
518544
get_chunk_map_state(&mmapper, FIXED_ADDRESS),
519545
Some(MapState::Mapped)

src/util/heap/layout/mmapper.rs

+5-12
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
1-
use crate::util::{side_metadata::SideMetadata, Address};
2-
31
use super::vm_layout_constants::BYTES_IN_CHUNK;
2+
use crate::util::{side_metadata::SideMetadata, Address};
3+
use std::io::Result;
44

55
pub trait Mmapper {
66
/****************************************************************************
@@ -37,19 +37,12 @@ pub trait Mmapper {
3737
* @param start The start of the range to be mapped.
3838
* @param pages The size of the range to be mapped, in pages
3939
*/
40-
fn ensure_mapped(&self, start: Address, pages: usize, metadata: &SideMetadata);
40+
fn ensure_mapped(&self, start: Address, pages: usize, metadata: &SideMetadata) -> Result<()>;
4141

4242
/// Map metadata memory for a given chunk
4343
#[allow(clippy::result_unit_err)]
44-
fn map_metadata(&self, chunk: Address, metadata: &SideMetadata) -> Result<(), ()> {
45-
if metadata
46-
.try_map_metadata_space(chunk, BYTES_IN_CHUNK)
47-
.is_err()
48-
{
49-
Err(())
50-
} else {
51-
Ok(())
52-
}
44+
fn map_metadata(&self, chunk: Address, metadata: &SideMetadata) -> Result<()> {
45+
metadata.try_map_metadata_space(chunk, BYTES_IN_CHUNK)
5346
}
5447

5548
/**

src/util/memory.rs

+24-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,8 @@
1+
use crate::util::opaque_pointer::*;
12
use crate::util::Address;
3+
use crate::vm::{Collection, VMBinding};
24
use libc::{PROT_EXEC, PROT_NONE, PROT_READ, PROT_WRITE};
3-
use std::io::Result;
5+
use std::io::{Error, Result};
46

57
pub fn result_is_mapped(result: Result<()>) -> bool {
68
match result {
@@ -80,6 +82,27 @@ pub fn munmap(start: Address, size: usize) -> Result<()> {
8082
wrap_libc_call(&|| unsafe { libc::munmap(start.to_mut_ptr(), size) }, 0)
8183
}
8284

85+
/// Properly handle errors from a mmap Result, including invoking the binding code for an OOM error.
86+
pub fn handle_mmap_error<VM: VMBinding>(error: Error, tls: OpaquePointer) -> ! {
87+
use std::io::ErrorKind;
88+
89+
match error.kind() {
90+
ErrorKind::Other => {
91+
// further check the error
92+
if let Some(os_errno) = error.raw_os_error() {
93+
// If it is OOM, we invoke out_of_memory() through the VM interface.
94+
if os_errno == libc::ENOMEM {
95+
VM::VMCollection::out_of_memory(tls);
96+
unreachable!()
97+
}
98+
}
99+
}
100+
ErrorKind::AlreadyExists => panic!("Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?"),
101+
_ => {}
102+
}
103+
panic!("Unexpected mmap failure: {:?}", error)
104+
}
105+
83106
/// Checks if the memory has already been mapped. If not, we panic.
84107
// Note that the checking has a side effect that it will map the memory if it was unmapped. So we panic if it was unmapped.
85108
// Be very careful about using this function.

src/vm/collection.rs

+1
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,7 @@ pub trait Collection<VM: VMBinding> {
5555
);
5656

5757
/// Inform the VM for an out-of-memory error. The VM can implement its own error routine for OOM.
58+
/// Note the VM needs to fail in this call. We do not expect the VM to resume in any way.
5859
///
5960
/// Arguments:
6061
/// * `tls`: The thread pointer for the mutator which failed the allocation and triggered the OOM.
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
use mmtk::util::Address;
2+
use mmtk::util::OpaquePointer;
3+
use mmtk::util::memory;
4+
use crate::DummyVM;
5+
6+
#[test]
7+
pub fn test_handle_mmap_conflict() {
8+
let start = unsafe { Address::from_usize(0x100_0000 )};
9+
let one_megabyte = 1000000;
10+
let mmap1_res = memory::dzmmap_noreplace(start, one_megabyte);
11+
assert!(mmap1_res.is_ok());
12+
13+
let panic_res = std::panic::catch_unwind(|| {
14+
let mmap2_res = memory::dzmmap_noreplace(start, one_megabyte);
15+
assert!(mmap2_res.is_err());
16+
memory::handle_mmap_error::<DummyVM>(mmap2_res.err().unwrap(), OpaquePointer::UNINITIALIZED);
17+
});
18+
19+
// The error should match the error message in memory::handle_mmap_error()
20+
assert!(panic_res.is_err());
21+
let err = panic_res.err().unwrap();
22+
assert!(err.is::<&str>());
23+
assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Failed to mmap, the address is already mapped. Should MMTk quanrantine the address range first?");
24+
}
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,23 @@
1+
use mmtk::util::Address;
2+
use mmtk::util::OpaquePointer;
3+
use mmtk::util::memory;
4+
use crate::DummyVM;
5+
6+
#[test]
7+
pub fn test_handle_mmap_oom() {
8+
let panic_res = std::panic::catch_unwind(move || {
9+
let start = unsafe { Address::from_usize(0x100_0000 )};
10+
let one_terabyte = 1000000000000;
11+
// mmap 1 terabyte memory - we expect this will fail due to out of memory.
12+
// If that's not the case, increase the size we mmap.
13+
let mmap_res = memory::dzmmap_noreplace(start, one_terabyte);
14+
15+
memory::handle_mmap_error::<DummyVM>(mmap_res.err().unwrap(), OpaquePointer::UNINITIALIZED);
16+
});
17+
assert!(panic_res.is_err());
18+
19+
// The error should match the default implementation of Collection::out_of_memory()
20+
let err = panic_res.err().unwrap();
21+
assert!(err.is::<&str>());
22+
assert_eq!(err.downcast_ref::<&str>().unwrap(), &"Out of memory!");
23+
}

0 commit comments

Comments
 (0)