@@ -623,6 +623,24 @@ static umf_result_t coarse_add_used_block(coarse_t *coarse, void *addr,
623
623
return UMF_RESULT_SUCCESS ;
624
624
}
625
625
626
+ static umf_result_t coarse_add_free_block (coarse_t * coarse , void * addr ,
627
+ size_t size , block_t * * free_block ) {
628
+ * free_block = NULL ;
629
+
630
+ block_t * new_block =
631
+ coarse_ravl_add_new (coarse -> all_blocks , addr , size , NULL );
632
+ if (new_block == NULL ) {
633
+ return UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
634
+ }
635
+
636
+ new_block -> used = false;
637
+ coarse -> alloc_size += size ;
638
+
639
+ * free_block = new_block ;
640
+
641
+ return UMF_RESULT_SUCCESS ;
642
+ }
643
+
626
644
static void coarse_ravl_cb_rm_all_blocks_node (void * data , void * arg ) {
627
645
assert (data );
628
646
assert (arg );
@@ -1053,88 +1071,101 @@ umf_result_t coarse_alloc(coarse_t *coarse, size_t size, size_t alignment,
1053
1071
1054
1072
assert (debug_check (coarse ));
1055
1073
1074
+ * resultPtr = NULL ;
1075
+
1056
1076
// Find a block with greater or equal size using the given memory allocation strategy
1057
1077
block_t * curr = find_free_block (coarse -> free_blocks , size , alignment ,
1058
1078
coarse -> allocation_strategy );
1059
-
1060
- // If the block that we want to reuse has a greater size, split it.
1061
- // Try to merge the split part with the successor if it is not used.
1062
- enum { ACTION_NONE = 0 , ACTION_USE , ACTION_SPLIT } action = ACTION_NONE ;
1063
-
1064
- if (curr && curr -> size > size ) {
1065
- action = ACTION_SPLIT ;
1066
- } else if (curr && curr -> size == size ) {
1067
- action = ACTION_USE ;
1068
- }
1069
-
1070
- if (action ) { // ACTION_SPLIT or ACTION_USE
1071
- assert (curr -> used == false);
1072
-
1073
- // In case of non-zero alignment create an aligned block what would be further used.
1074
- if (alignment > 0 ) {
1075
- umf_result = create_aligned_block (coarse , size , alignment , & curr );
1076
- if (umf_result != UMF_RESULT_SUCCESS ) {
1077
- (void )free_blocks_re_add (coarse , curr );
1078
- goto err_unlock ;
1079
- }
1079
+ if (curr == NULL ) {
1080
+ // no suitable block found - try to get more memory from the upstream provider
1081
+ umf_result = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
1082
+
1083
+ if (!coarse -> cb .alloc ) {
1084
+ LOG_ERR ("out of memory (the memory provider does not support "
1085
+ "allocating more memory)" );
1086
+ goto err_unlock ;
1080
1087
}
1081
1088
1082
- if ( action == ACTION_SPLIT ) {
1083
- // Split the current block and put the new block after the one that we use.
1084
- umf_result = split_current_block ( coarse , curr , size );
1085
- if ( umf_result != UMF_RESULT_SUCCESS ) {
1086
- ( void ) free_blocks_re_add ( coarse , curr );
1087
- goto err_unlock ;
1088
- }
1089
+ size_t size_aligned = ALIGN_UP_SAFE ( size , alignment );
1090
+ if ( size_aligned == 0 ) {
1091
+ // cannot align up (arithmetic overflow)
1092
+ umf_result = UMF_RESULT_ERROR_INVALID_ARGUMENT ;
1093
+ LOG_ERR ( "size too huge (arithmetic overflow)" );
1094
+ goto err_unlock ;
1095
+ }
1089
1096
1090
- curr -> size = size ;
1097
+ umf_result = coarse -> cb .alloc (coarse -> provider , size_aligned , alignment ,
1098
+ resultPtr );
1099
+ if (umf_result != UMF_RESULT_SUCCESS ) {
1100
+ LOG_ERR ("coarse_alloc_cb() failed: out of memory" );
1101
+ goto err_unlock ;
1102
+ }
1091
1103
1092
- LOG_DEBUG ("coarse_ALLOC (split_block) %zu used %zu alloc %zu" , size ,
1093
- coarse -> used_size , coarse -> alloc_size );
1104
+ ASSERT_IS_ALIGNED (((uintptr_t )(* resultPtr )), alignment );
1094
1105
1095
- } else { // action == ACTION_USE
1096
- LOG_DEBUG ("coarse_ALLOC (same_block) %zu used %zu alloc %zu" , size ,
1097
- coarse -> used_size , coarse -> alloc_size );
1106
+ block_t * new_free_block = NULL ;
1107
+ umf_result = coarse_add_free_block (coarse , * resultPtr , size_aligned ,
1108
+ & new_free_block );
1109
+ if (umf_result != UMF_RESULT_SUCCESS ) {
1110
+ if (coarse -> cb .free ) {
1111
+ coarse -> cb .free (coarse -> provider , * resultPtr , size_aligned );
1112
+ }
1113
+ goto err_unlock ;
1098
1114
}
1099
1115
1100
- curr -> used = true;
1101
- * resultPtr = curr -> data ;
1102
- coarse -> used_size += size ;
1103
-
1104
- assert (debug_check (coarse ));
1105
- utils_mutex_unlock (& coarse -> lock );
1116
+ LOG_DEBUG ("coarse_ALLOC (memory_provider) %zu used %zu alloc %zu" ,
1117
+ size_aligned , coarse -> used_size , coarse -> alloc_size );
1106
1118
1107
- return UMF_RESULT_SUCCESS ;
1119
+ curr = new_free_block ;
1108
1120
}
1109
1121
1110
- // no suitable block found - try to get more memory from the upstream provider
1111
- umf_result = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
1112
-
1113
- * resultPtr = NULL ;
1122
+ // If the block that we want to reuse has a greater size, split it.
1123
+ // Try to merge the split part with the successor if it is not used.
1124
+ enum { ACTION_NONE = 0 , ACTION_USE , ACTION_SPLIT } action = ACTION_NONE ;
1114
1125
1115
- if (!coarse -> cb .alloc ) {
1126
+ if (curr && curr -> size > size ) {
1127
+ action = ACTION_SPLIT ;
1128
+ } else if (curr && curr -> size == size ) {
1129
+ action = ACTION_USE ;
1130
+ } else {
1131
+ umf_result = UMF_RESULT_ERROR_OUT_OF_HOST_MEMORY ;
1116
1132
LOG_ERR ("out of memory" );
1117
1133
goto err_unlock ;
1118
1134
}
1119
1135
1120
- umf_result = coarse -> cb .alloc (coarse -> provider , size , alignment , resultPtr );
1121
- if (umf_result != UMF_RESULT_SUCCESS ) {
1122
- LOG_ERR ("coarse_alloc_cb() failed: out of memory" );
1123
- goto err_unlock ;
1124
- }
1136
+ // ACTION_SPLIT or ACTION_USE
1137
+ assert (curr -> used == false);
1125
1138
1126
- ASSERT_IS_ALIGNED (((uintptr_t )(* resultPtr )), alignment );
1139
+ // In case of non-zero alignment create an aligned block what would be further used.
1140
+ if (alignment > 0 ) {
1141
+ umf_result = create_aligned_block (coarse , size , alignment , & curr );
1142
+ if (umf_result != UMF_RESULT_SUCCESS ) {
1143
+ (void )free_blocks_re_add (coarse , curr );
1144
+ goto err_unlock ;
1145
+ }
1146
+ }
1127
1147
1128
- umf_result = coarse_add_used_block (coarse , * resultPtr , size );
1129
- if (umf_result != UMF_RESULT_SUCCESS ) {
1130
- if (coarse -> cb .free ) {
1131
- coarse -> cb .free (coarse -> provider , * resultPtr , size );
1148
+ if (action == ACTION_SPLIT ) {
1149
+ // Split the current block and put the new block after the one that we use.
1150
+ umf_result = split_current_block (coarse , curr , size );
1151
+ if (umf_result != UMF_RESULT_SUCCESS ) {
1152
+ (void )free_blocks_re_add (coarse , curr );
1153
+ goto err_unlock ;
1132
1154
}
1133
- goto err_unlock ;
1155
+
1156
+ curr -> size = size ;
1157
+
1158
+ LOG_DEBUG ("coarse_ALLOC (split_block) %zu used %zu alloc %zu" , size ,
1159
+ coarse -> used_size , coarse -> alloc_size );
1160
+
1161
+ } else { // action == ACTION_USE
1162
+ LOG_DEBUG ("coarse_ALLOC (same_block) %zu used %zu alloc %zu" , size ,
1163
+ coarse -> used_size , coarse -> alloc_size );
1134
1164
}
1135
1165
1136
- LOG_DEBUG ("coarse_ALLOC (memory_provider) %zu used %zu alloc %zu" , size ,
1137
- coarse -> used_size , coarse -> alloc_size );
1166
+ curr -> used = true;
1167
+ * resultPtr = curr -> data ;
1168
+ coarse -> used_size += size ;
1138
1169
1139
1170
umf_result = UMF_RESULT_SUCCESS ;
1140
1171
0 commit comments