1
1
use super :: super :: SpaceId ;
2
2
use super :: PageResource ;
3
3
use crate :: util:: freelist:: page_freelist:: PageFreeList ;
4
+ use crate :: util:: heap:: HEAP ;
4
5
use crate :: util:: memory:: RawMemory ;
5
6
use crate :: util:: * ;
6
7
use spin:: mutex:: Mutex ;
@@ -20,49 +21,39 @@ pub struct FreelistPageResource {
20
21
freelist : Mutex < PageFreeList < { NUM_SIZE_CLASS } > , Yield > ,
21
22
reserved_bytes : AtomicUsize ,
22
23
meta : RwLock < Vec < AtomicU32 > , Yield > ,
24
+ base : Address ,
23
25
}
24
26
25
27
impl FreelistPageResource {
26
28
pub fn new ( id : SpaceId ) -> Self {
27
29
debug_assert ! ( id. 0 < 0b0000_1111 ) ;
28
- let base = id. address_space ( ) . start ;
30
+ let range = HEAP . get_space_range ( id) ;
31
+ let base = range. start ;
29
32
let mut freelist = PageFreeList :: new ( base) ;
30
33
freelist. release_cell ( base, 1 << ( NUM_SIZE_CLASS - 1 ) ) ;
31
34
Self {
32
35
id,
33
36
freelist : Mutex :: new ( freelist) ,
34
37
reserved_bytes : AtomicUsize :: new ( 0 ) ,
35
38
meta : RwLock :: new ( unsafe { std:: mem:: transmute ( vec ! [ 0u32 ; 1 << 20 ] ) } ) ,
39
+ base,
36
40
}
37
41
}
38
42
39
- fn map_pages < S : PageSize > ( & self , start : Page < S > , pages : usize ) -> bool {
40
- let size = pages << S :: LOG_BYTES ;
41
- match RawMemory :: map ( start. start ( ) , size) {
42
- Ok ( _) => {
43
- #[ cfg( target_os = "linux" ) ]
44
- if cfg ! ( feature = "transparent_huge_page" ) && S :: LOG_BYTES != Size4K :: LOG_BYTES {
45
- unsafe {
46
- libc:: madvise ( start. start ( ) . as_mut_ptr ( ) , size, libc:: MADV_HUGEPAGE ) ;
47
- }
48
- }
49
- self . reserved_bytes
50
- . fetch_add ( pages << S :: LOG_BYTES , Ordering :: SeqCst ) ;
51
- true
52
- }
53
- _ => false ,
54
- }
43
+ fn map_pages < S : PageSize > ( & self , _start : Page < S > , pages : usize ) {
44
+ self . reserved_bytes
45
+ . fetch_add ( pages << S :: LOG_BYTES , Ordering :: SeqCst ) ;
55
46
}
56
47
57
48
fn unmap_pages < S : PageSize > ( & self , start : Page < S > , pages : usize ) {
58
- RawMemory :: unmap ( start. start ( ) , pages << S :: LOG_BYTES ) ;
49
+ RawMemory :: madv_free ( start. start ( ) , pages << S :: LOG_BYTES ) ;
59
50
self . reserved_bytes
60
51
. fetch_sub ( pages << S :: LOG_BYTES , Ordering :: SeqCst ) ;
61
52
}
62
53
63
54
fn set_meta < S : PageSize > ( & self , start : Page < S > , pages : usize ) {
64
55
debug_assert ! ( pages <= u32 :: MAX as usize ) ;
65
- let index = ( start. start ( ) - self . id . address_space ( ) . start ) >> Page :: < Size4K > :: LOG_BYTES ;
56
+ let index = ( start. start ( ) - self . base ) >> Page :: < Size4K > :: LOG_BYTES ;
66
57
let meta = self . meta . upgradeable_read ( ) ;
67
58
if index >= meta. len ( ) {
68
59
let mut meta = meta. upgrade ( ) ;
@@ -75,7 +66,7 @@ impl FreelistPageResource {
75
66
}
76
67
77
68
fn get_meta < S : PageSize > ( & self , start : Page < S > ) -> usize {
78
- let index = ( start. start ( ) - self . id . address_space ( ) . start ) >> Page :: < Size4K > :: LOG_BYTES ;
69
+ let index = ( start. start ( ) - self . base ) >> Page :: < Size4K > :: LOG_BYTES ;
79
70
self . meta . read ( ) [ index] . load ( Ordering :: Relaxed ) as _
80
71
}
81
72
}
@@ -90,9 +81,7 @@ impl PageResource for FreelistPageResource {
90
81
let units = pages << ( S :: LOG_BYTES - Size4K :: LOG_BYTES ) ;
91
82
let start = self . freelist . lock ( ) . allocate_cell ( units) ?. start ;
92
83
let start = Page :: < S > :: new ( start) ;
93
- if !self . map_pages ( start, pages) {
94
- return self . acquire_pages ( pages) ; // Retry
95
- }
84
+ self . map_pages ( start, pages) ;
96
85
let end = Step :: forward ( start, pages) ;
97
86
self . set_meta ( start, units) ;
98
87
Some ( start..end)
0 commit comments