Skip to content

Veristat memory accounting #9050

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 8 additions & 7 deletions kernel/bpf/btf.c
Original file line number Diff line number Diff line change
Expand Up @@ -3443,7 +3443,8 @@ btf_find_graph_root(const struct btf *btf, const struct btf_type *pt,
node_field_name = strstr(value_type, ":");
if (!node_field_name)
return -EINVAL;
value_type = kstrndup(value_type, node_field_name - value_type, GFP_KERNEL | __GFP_NOWARN);
value_type = kstrndup(value_type, node_field_name - value_type,
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!value_type)
return -ENOMEM;
id = btf_find_by_name_kind(btf, value_type, BTF_KIND_STRUCT);
Expand Down Expand Up @@ -3958,7 +3959,7 @@ struct btf_record *btf_parse_fields(const struct btf *btf, const struct btf_type
/* This needs to be kzalloc to zero out padding and unused fields, see
* comment in btf_record_equal.
*/
rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL | __GFP_NOWARN);
rec = kzalloc(struct_size(rec, fields, cnt), GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!rec)
return ERR_PTR(-ENOMEM);

Expand Down Expand Up @@ -9019,15 +9020,15 @@ static struct bpf_cand_cache *populate_cand_cache(struct bpf_cand_cache *cands,
bpf_free_cands_from_cache(*cc);
*cc = NULL;
}
new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL);
new_cands = kmemdup(cands, sizeof_cands(cands->cnt), GFP_KERNEL_ACCOUNT);
if (!new_cands) {
bpf_free_cands(cands);
return ERR_PTR(-ENOMEM);
}
/* strdup the name, since it will stay in cache.
* the cands->name points to strings in prog's BTF and the prog can be unloaded.
*/
new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL);
new_cands->name = kmemdup_nul(cands->name, cands->name_len, GFP_KERNEL_ACCOUNT);
bpf_free_cands(cands);
if (!new_cands->name) {
kfree(new_cands);
Expand Down Expand Up @@ -9111,7 +9112,7 @@ bpf_core_add_cands(struct bpf_cand_cache *cands, const struct btf *targ_btf,
continue;

/* most of the time there is only one candidate for a given kind+name pair */
new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL);
new_cands = kmalloc(sizeof_cands(cands->cnt + 1), GFP_KERNEL_ACCOUNT);
if (!new_cands) {
bpf_free_cands(cands);
return ERR_PTR(-ENOMEM);
Expand Down Expand Up @@ -9228,7 +9229,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
/* ~4k of temp memory necessary to convert LLVM spec like "0:1:0:5"
* into arrays of btf_ids of struct fields and array indices.
*/
specs = kcalloc(3, sizeof(*specs), GFP_KERNEL);
specs = kcalloc(3, sizeof(*specs), GFP_KERNEL_ACCOUNT);
if (!specs)
return -ENOMEM;

Expand All @@ -9253,7 +9254,7 @@ int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo,
goto out;
}
if (cc->cnt) {
cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL);
cands.cands = kcalloc(cc->cnt, sizeof(*cands.cands), GFP_KERNEL_ACCOUNT);
if (!cands.cands) {
err = -ENOMEM;
goto out;
Expand Down
49 changes: 25 additions & 24 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -1404,7 +1404,7 @@ static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
goto out;

alloc_size = kmalloc_size_roundup(size_mul(new_n, size));
new_arr = krealloc(arr, alloc_size, GFP_KERNEL);
new_arr = krealloc(arr, alloc_size, GFP_KERNEL_ACCOUNT);
if (!new_arr) {
kfree(arr);
return NULL;
Expand All @@ -1421,7 +1421,7 @@ static void *realloc_array(void *arr, size_t old_n, size_t new_n, size_t size)
static int copy_reference_state(struct bpf_verifier_state *dst, const struct bpf_verifier_state *src)
{
dst->refs = copy_array(dst->refs, src->refs, src->acquired_refs,
sizeof(struct bpf_reference_state), GFP_KERNEL);
sizeof(struct bpf_reference_state), GFP_KERNEL_ACCOUNT);
if (!dst->refs)
return -ENOMEM;

Expand All @@ -1440,7 +1440,7 @@ static int copy_stack_state(struct bpf_func_state *dst, const struct bpf_func_st
size_t n = src->allocated_stack / BPF_REG_SIZE;

dst->stack = copy_array(dst->stack, src->stack, n, sizeof(struct bpf_stack_state),
GFP_KERNEL);
GFP_KERNEL_ACCOUNT);
if (!dst->stack)
return -ENOMEM;

Expand Down Expand Up @@ -1761,7 +1761,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
for (i = 0; i <= src->curframe; i++) {
dst = dst_state->frame[i];
if (!dst) {
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
dst = kzalloc(sizeof(*dst), GFP_KERNEL_ACCOUNT);
if (!dst)
return -ENOMEM;
dst_state->frame[i] = dst;
Expand Down Expand Up @@ -2033,7 +2033,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
struct bpf_verifier_stack_elem *elem;
int err;

elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT);
if (!elem)
goto err;

Expand Down Expand Up @@ -2800,7 +2800,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
struct bpf_verifier_stack_elem *elem;
struct bpf_func_state *frame;

elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL);
elem = kzalloc(sizeof(struct bpf_verifier_stack_elem), GFP_KERNEL_ACCOUNT);
if (!elem)
goto err;

Expand Down Expand Up @@ -2828,7 +2828,7 @@ static struct bpf_verifier_state *push_async_cb(struct bpf_verifier_env *env,
elem->st.in_sleepable = is_sleepable;
elem->st.insn_hist_start = env->cur_state->insn_hist_end;
elem->st.insn_hist_end = elem->st.insn_hist_start;
frame = kzalloc(sizeof(*frame), GFP_KERNEL);
frame = kzalloc(sizeof(*frame), GFP_KERNEL_ACCOUNT);
if (!frame)
goto err;
init_func_state(env, frame,
Expand Down Expand Up @@ -3180,7 +3180,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
return -EINVAL;
}

tab = kzalloc(sizeof(*tab), GFP_KERNEL);
tab = kzalloc(sizeof(*tab), GFP_KERNEL_ACCOUNT);
if (!tab)
return -ENOMEM;
prog_aux->kfunc_tab = tab;
Expand All @@ -3196,7 +3196,7 @@ static int add_kfunc_call(struct bpf_verifier_env *env, u32 func_id, s16 offset)
return 0;

if (!btf_tab && offset) {
btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL);
btf_tab = kzalloc(sizeof(*btf_tab), GFP_KERNEL_ACCOUNT);
if (!btf_tab)
return -ENOMEM;
prog_aux->kfunc_btf_tab = btf_tab;
Expand Down Expand Up @@ -10292,7 +10292,7 @@ static int setup_func_entry(struct bpf_verifier_env *env, int subprog, int calls
}

caller = state->frame[state->curframe];
callee = kzalloc(sizeof(*callee), GFP_KERNEL);
callee = kzalloc(sizeof(*callee), GFP_KERNEL_ACCOUNT);
if (!callee)
return -ENOMEM;
state->frame[state->curframe + 1] = callee;
Expand Down Expand Up @@ -17629,17 +17629,18 @@ static int check_cfg(struct bpf_verifier_env *env)
int *insn_stack, *insn_state, *insn_postorder;
int ex_insn_beg, i, ret = 0;

insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
insn_state = env->cfg.insn_state = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT);
if (!insn_state)
return -ENOMEM;

insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
insn_stack = env->cfg.insn_stack = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT);
if (!insn_stack) {
kvfree(insn_state);
return -ENOMEM;
}

insn_postorder = env->cfg.insn_postorder = kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL);
insn_postorder = env->cfg.insn_postorder =
kvcalloc(insn_cnt, sizeof(int), GFP_KERNEL_ACCOUNT);
if (!insn_postorder) {
kvfree(insn_state);
kvfree(insn_stack);
Expand Down Expand Up @@ -17773,7 +17774,7 @@ static int check_btf_func_early(struct bpf_verifier_env *env,
urecord = make_bpfptr(attr->func_info, uattr.is_kernel);
min_size = min_t(u32, krec_size, urec_size);

krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL | __GFP_NOWARN);
krecord = kvcalloc(nfuncs, krec_size, GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!krecord)
return -ENOMEM;

Expand Down Expand Up @@ -17873,7 +17874,7 @@ static int check_btf_func(struct bpf_verifier_env *env,
urecord = make_bpfptr(attr->func_info, uattr.is_kernel);

krecord = prog->aux->func_info;
info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL | __GFP_NOWARN);
info_aux = kcalloc(nfuncs, sizeof(*info_aux), GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!info_aux)
return -ENOMEM;

Expand Down Expand Up @@ -17959,7 +17960,7 @@ static int check_btf_line(struct bpf_verifier_env *env,
* pass in a smaller bpf_line_info object.
*/
linfo = kvcalloc(nr_linfo, sizeof(struct bpf_line_info),
GFP_KERNEL | __GFP_NOWARN);
GFP_KERNEL_ACCOUNT | __GFP_NOWARN);
if (!linfo)
return -ENOMEM;

Expand Down Expand Up @@ -19306,7 +19307,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
* When looping the sl->state.branches will be > 0 and this state
* will not be considered for equivalence until branches == 0.
*/
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL);
new_sl = kzalloc(sizeof(struct bpf_verifier_state_list), GFP_KERNEL_ACCOUNT);
if (!new_sl)
return -ENOMEM;
env->total_states++;
Expand Down Expand Up @@ -22795,13 +22796,13 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog)
env->prev_linfo = NULL;
env->pass_cnt++;

state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL);
state = kzalloc(sizeof(struct bpf_verifier_state), GFP_KERNEL_ACCOUNT);
if (!state)
return -ENOMEM;
state->curframe = 0;
state->speculative = false;
state->branches = 1;
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL_ACCOUNT);
if (!state->frame[0]) {
kfree(state);
return -ENOMEM;
Expand Down Expand Up @@ -23027,7 +23028,7 @@ static void print_verification_stats(struct bpf_verifier_env *env)
int bpf_prog_ctx_arg_info_init(struct bpf_prog *prog,
const struct bpf_ctx_arg_aux *info, u32 cnt)
{
prog->aux->ctx_arg_info = kmemdup_array(info, cnt, sizeof(*info), GFP_KERNEL);
prog->aux->ctx_arg_info = kmemdup_array(info, cnt, sizeof(*info), GFP_KERNEL_ACCOUNT);
prog->aux->ctx_arg_info_size = cnt;

return prog->aux->ctx_arg_info ? 0 : -ENOMEM;
Expand Down Expand Up @@ -23971,7 +23972,7 @@ static int compute_live_registers(struct bpf_verifier_env *env)
* - repeat the computation while {in,out} fields changes for
* any instruction.
*/
state = kvcalloc(insn_cnt, sizeof(*state), GFP_KERNEL);
state = kvcalloc(insn_cnt, sizeof(*state), GFP_KERNEL_ACCOUNT);
if (!state) {
err = -ENOMEM;
goto out;
Expand Down Expand Up @@ -24045,7 +24046,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
/* 'struct bpf_verifier_env' can be global, but since it's not small,
* allocate/free it every time bpf_check() is called
*/
env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL);
env = kvzalloc(sizeof(struct bpf_verifier_env), GFP_KERNEL_ACCOUNT);
if (!env)
return -ENOMEM;

Expand Down Expand Up @@ -24235,7 +24236,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
/* if program passed verifier, update used_maps in bpf_prog_info */
env->prog->aux->used_maps = kmalloc_array(env->used_map_cnt,
sizeof(env->used_maps[0]),
GFP_KERNEL);
GFP_KERNEL_ACCOUNT);

if (!env->prog->aux->used_maps) {
ret = -ENOMEM;
Expand All @@ -24250,7 +24251,7 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
/* if program passed verifier, update used_btfs in bpf_prog_aux */
env->prog->aux->used_btfs = kmalloc_array(env->used_btf_cnt,
sizeof(env->used_btfs[0]),
GFP_KERNEL);
GFP_KERNEL_ACCOUNT);
if (!env->prog->aux->used_btfs) {
ret = -ENOMEM;
goto err_release_maps;
Expand Down
Loading
Loading