@@ -11,6 +11,7 @@ use std::sync::atomic::Ordering;
11
11
use std:: sync:: Mutex ;
12
12
13
13
use crate :: util:: memory:: { dzmmap_noreplace, mprotect, munprotect} ;
14
+ use std:: io:: Result ;
14
15
use std:: mem:: transmute;
15
16
16
17
const UNMAPPED : u8 = 0 ;
@@ -48,7 +49,7 @@ impl Mmapper for ByteMapMmapper {
48
49
}
49
50
}
50
51
51
- fn ensure_mapped ( & self , start : Address , pages : usize , metadata : & SideMetadata ) {
52
+ fn ensure_mapped ( & self , start : Address , pages : usize , metadata : & SideMetadata ) -> Result < ( ) > {
52
53
let start_chunk = Self :: address_to_mmap_chunks_down ( start) ;
53
54
let end_chunk = Self :: address_to_mmap_chunks_up ( start + pages_to_bytes ( pages) ) ;
54
55
trace ! (
@@ -68,27 +69,15 @@ impl Mmapper for ByteMapMmapper {
68
69
let guard = self . lock . lock ( ) . unwrap ( ) ;
69
70
// might have become MAPPED here
70
71
if self . mapped [ chunk] . load ( Ordering :: Relaxed ) == UNMAPPED {
71
- match dzmmap_noreplace ( mmap_start, MMAP_CHUNK_BYTES ) {
72
- Ok ( _) => {
73
- self . map_metadata ( mmap_start, metadata)
74
- . expect ( "failed to map metadata memory" ) ;
75
- if VERBOSE {
76
- trace ! (
77
- "mmap succeeded at chunk {} {} with len = {}" ,
78
- chunk,
79
- mmap_start,
80
- MMAP_CHUNK_BYTES
81
- ) ;
82
- }
83
- }
84
- Err ( e) => {
85
- drop ( guard) ;
86
- panic ! (
87
- "ensureMapped failed on address {}\n \
88
- Can't get more space with mmap(): {}",
89
- mmap_start, e
90
- ) ;
91
- }
72
+ // map data
73
+ let res = dzmmap_noreplace ( mmap_start, MMAP_CHUNK_BYTES ) ;
74
+ if res. is_err ( ) {
75
+ return res;
76
+ }
77
+ // map metadata
78
+ let res = self . map_metadata ( mmap_start, metadata) ;
79
+ if res. is_err ( ) {
80
+ return res;
92
81
}
93
82
}
94
83
@@ -114,6 +103,8 @@ impl Mmapper for ByteMapMmapper {
114
103
self . mapped [ chunk] . store ( MAPPED , Ordering :: Relaxed ) ;
115
104
drop ( guard) ;
116
105
}
106
+
107
+ Ok ( ( ) )
117
108
}
118
109
119
110
/**
@@ -268,7 +259,9 @@ mod tests {
268
259
|| {
269
260
let mmapper = ByteMapMmapper :: new ( ) ;
270
261
let pages = 1 ;
271
- mmapper. ensure_mapped ( FIXED_ADDRESS , pages, & NO_METADATA ) ;
262
+ mmapper
263
+ . ensure_mapped ( FIXED_ADDRESS , pages, & NO_METADATA )
264
+ . unwrap ( ) ;
272
265
273
266
let start_chunk = ByteMapMmapper :: address_to_mmap_chunks_down ( FIXED_ADDRESS ) ;
274
267
let end_chunk = ByteMapMmapper :: address_to_mmap_chunks_up (
@@ -292,7 +285,9 @@ mod tests {
292
285
|| {
293
286
let mmapper = ByteMapMmapper :: new ( ) ;
294
287
let pages = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize ;
295
- mmapper. ensure_mapped ( FIXED_ADDRESS , pages, & NO_METADATA ) ;
288
+ mmapper
289
+ . ensure_mapped ( FIXED_ADDRESS , pages, & NO_METADATA )
290
+ . unwrap ( ) ;
296
291
297
292
let start_chunk = ByteMapMmapper :: address_to_mmap_chunks_down ( FIXED_ADDRESS ) ;
298
293
let end_chunk = ByteMapMmapper :: address_to_mmap_chunks_up (
@@ -317,7 +312,9 @@ mod tests {
317
312
let mmapper = ByteMapMmapper :: new ( ) ;
318
313
let pages =
319
314
( MMAP_CHUNK_BYTES + MMAP_CHUNK_BYTES / 2 ) >> LOG_BYTES_IN_PAGE as usize ;
320
- mmapper. ensure_mapped ( FIXED_ADDRESS , pages, & NO_METADATA ) ;
315
+ mmapper
316
+ . ensure_mapped ( FIXED_ADDRESS , pages, & NO_METADATA )
317
+ . unwrap ( ) ;
321
318
322
319
let start_chunk = ByteMapMmapper :: address_to_mmap_chunks_down ( FIXED_ADDRESS ) ;
323
320
let end_chunk = ByteMapMmapper :: address_to_mmap_chunks_up (
@@ -343,7 +340,9 @@ mod tests {
343
340
// map 2 chunks
344
341
let mmapper = ByteMapMmapper :: new ( ) ;
345
342
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize ;
346
- mmapper. ensure_mapped ( FIXED_ADDRESS , pages_per_chunk * 2 , & NO_METADATA ) ;
343
+ mmapper
344
+ . ensure_mapped ( FIXED_ADDRESS , pages_per_chunk * 2 , & NO_METADATA )
345
+ . unwrap ( ) ;
347
346
348
347
// protect 1 chunk
349
348
mmapper. protect ( FIXED_ADDRESS , pages_per_chunk) ;
@@ -367,7 +366,9 @@ mod tests {
367
366
// map 2 chunks
368
367
let mmapper = ByteMapMmapper :: new ( ) ;
369
368
let pages_per_chunk = MMAP_CHUNK_BYTES >> LOG_BYTES_IN_PAGE as usize ;
370
- mmapper. ensure_mapped ( FIXED_ADDRESS , pages_per_chunk * 2 , & NO_METADATA ) ;
369
+ mmapper
370
+ . ensure_mapped ( FIXED_ADDRESS , pages_per_chunk * 2 , & NO_METADATA )
371
+ . unwrap ( ) ;
371
372
372
373
// protect 1 chunk
373
374
mmapper. protect ( FIXED_ADDRESS , pages_per_chunk) ;
@@ -377,7 +378,9 @@ mod tests {
377
378
assert_eq ! ( mmapper. mapped[ chunk + 1 ] . load( Ordering :: Relaxed ) , MAPPED ) ;
378
379
379
380
// ensure mapped - this will unprotect the previously protected chunk
380
- mmapper. ensure_mapped ( FIXED_ADDRESS , pages_per_chunk * 2 , & NO_METADATA ) ;
381
+ mmapper
382
+ . ensure_mapped ( FIXED_ADDRESS , pages_per_chunk * 2 , & NO_METADATA )
383
+ . unwrap ( ) ;
381
384
assert_eq ! ( mmapper. mapped[ chunk] . load( Ordering :: Relaxed ) , MAPPED ) ;
382
385
assert_eq ! ( mmapper. mapped[ chunk + 1 ] . load( Ordering :: Relaxed ) , MAPPED ) ;
383
386
} ,
0 commit comments