@@ -259,7 +259,7 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
259
259
260
260
// set the dirty bits (todo: no need for an atomic op here?)
261
261
if (arena -> memid .initially_zero && arena -> blocks_dirty != NULL ) {
262
- memid -> initially_zero = _mi_bitmap_claim_across (arena -> blocks_dirty , arena -> field_count , needed_bcount , bitmap_index , NULL );
262
+ memid -> initially_zero = _mi_bitmap_claim_across (arena -> blocks_dirty , arena -> field_count , needed_bcount , bitmap_index , NULL , NULL );
263
263
}
264
264
265
265
// set commit state
@@ -271,10 +271,14 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
271
271
// commit requested, but the range may not be committed as a whole: ensure it is committed now
272
272
memid -> initially_committed = true;
273
273
bool any_uncommitted ;
274
- _mi_bitmap_claim_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index , & any_uncommitted );
274
+ size_t already_committed = 0 ;
275
+ _mi_bitmap_claim_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index , & any_uncommitted , & already_committed );
275
276
if (any_uncommitted ) {
277
+ mi_assert_internal (already_committed < needed_bcount );
278
+ const size_t commit_size = mi_arena_block_size (needed_bcount );
279
+ const size_t stat_commit_size = commit_size - mi_arena_block_size (already_committed );
276
280
bool commit_zero = false;
277
- if (!_mi_os_commit (p , mi_arena_block_size ( needed_bcount ) , & commit_zero )) {
281
+ if (!_mi_os_commit_ex (p , commit_size , & commit_zero , stat_commit_size )) {
278
282
memid -> initially_committed = false;
279
283
}
280
284
else {
@@ -284,7 +288,14 @@ static mi_decl_noinline void* mi_arena_try_alloc_at(mi_arena_t* arena, size_t ar
284
288
}
285
289
else {
286
290
// no need to commit, but check if already fully committed
287
- memid -> initially_committed = _mi_bitmap_is_claimed_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index );
291
+ size_t already_committed = 0 ;
292
+ memid -> initially_committed = _mi_bitmap_is_claimed_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index , & already_committed );
293
+ if (!memid -> initially_committed && already_committed > 0 ) {
294
+ // partially committed: as it will be committed at some time, adjust the stats and pretend the range is fully uncommitted.
295
+ mi_assert_internal (already_committed < needed_bcount );
296
+ _mi_stat_decrease (& _mi_stats_main .committed , mi_arena_block_size (already_committed ));
297
+ _mi_bitmap_unclaim_across (arena -> blocks_committed , arena -> field_count , needed_bcount , bitmap_index );
298
+ }
288
299
}
289
300
290
301
return p ;
@@ -468,17 +479,19 @@ static void mi_arena_purge(mi_arena_t* arena, size_t bitmap_idx, size_t blocks)
468
479
const size_t size = mi_arena_block_size (blocks );
469
480
void * const p = mi_arena_block_start (arena , bitmap_idx );
470
481
bool needs_recommit ;
471
- if (_mi_bitmap_is_claimed_across (arena -> blocks_committed , arena -> field_count , blocks , bitmap_idx )) {
482
+ size_t already_committed = 0 ;
483
+ if (_mi_bitmap_is_claimed_across (arena -> blocks_committed , arena -> field_count , blocks , bitmap_idx , & already_committed )) {
472
484
// all blocks are committed, we can purge freely
485
+ mi_assert_internal (already_committed == blocks );
473
486
needs_recommit = _mi_os_purge (p , size );
474
487
}
475
488
else {
476
489
// some blocks are not committed -- this can happen when a partially committed block is freed
477
490
// in `_mi_arena_free` and it is conservatively marked as uncommitted but still scheduled for a purge
478
- // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory),
479
- // and also undo the decommit stats (as it was already adjusted)
491
+ // we need to ensure we do not try to reset (as that may be invalid for uncommitted memory).
492
+ mi_assert_internal ( already_committed < blocks );
480
493
mi_assert_internal (mi_option_is_enabled (mi_option_purge_decommits ));
481
- needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , 0 );
494
+ needs_recommit = _mi_os_purge_ex (p , size , false /* allow reset? */ , mi_arena_block_size ( already_committed ) );
482
495
}
483
496
484
497
// clear the purged blocks
@@ -512,7 +525,7 @@ static void mi_arena_schedule_purge(mi_arena_t* arena, size_t bitmap_idx, size_t
512
525
else {
513
526
// already an expiration was set
514
527
}
515
- _mi_bitmap_claim_across (arena -> blocks_purge , arena -> field_count , blocks , bitmap_idx , NULL );
528
+ _mi_bitmap_claim_across (arena -> blocks_purge , arena -> field_count , blocks , bitmap_idx , NULL , NULL );
516
529
}
517
530
}
518
531
@@ -652,7 +665,7 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
652
665
if (p == NULL ) return ;
653
666
if (size == 0 ) return ;
654
667
const bool all_committed = (committed_size == size );
655
- const bool decommitted_size = (committed_size <= size ? size - committed_size : 0 );
668
+ const size_t decommitted_size = (committed_size <= size ? size - committed_size : 0 );
656
669
657
670
// need to set all memory to undefined as some parts may still be marked as no_access (like padding etc.)
658
671
mi_track_mem_undefined (p ,size );
@@ -695,14 +708,14 @@ void _mi_arena_free(void* p, size_t size, size_t committed_size, mi_memid_t memi
695
708
mi_assert_internal (arena -> blocks_purge != NULL );
696
709
697
710
if (!all_committed ) {
698
- // mark the entire range as no longer committed (so we recommit the full range when re-using)
711
+ // mark the entire range as no longer committed (so we will recommit the full range when re-using)
699
712
_mi_bitmap_unclaim_across (arena -> blocks_committed , arena -> field_count , blocks , bitmap_idx );
700
713
mi_track_mem_noaccess (p ,size );
701
- if (committed_size > 0 ) {
714
+ // if (committed_size > 0) {
702
715
// if partially committed, adjust the committed stats (is it will be recommitted when re-using)
703
716
// in the delayed purge, we do no longer decrease the commit if the range is not marked entirely as committed.
704
717
_mi_stat_decrease (& _mi_stats_main .committed , committed_size );
705
- }
718
+ // }
706
719
// note: if not all committed, it may be that the purge will reset/decommit the entire range
707
720
// that contains already decommitted parts. Since purge consistently uses reset or decommit that
708
721
// works (as we should never reset decommitted parts).
0 commit comments