Skip to content

[lts92] bpf: Fix partial dynptr stack slot reads/writes #40

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Jan 3, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
95 changes: 92 additions & 3 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -695,6 +695,12 @@ static bool dynptr_type_refcounted(enum bpf_dynptr_type type)
return type == BPF_DYNPTR_TYPE_RINGBUF;
}

static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);

static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi);

static int mark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_reg_state *reg,
enum bpf_arg_type arg_type, int insn_idx)
{
Expand Down Expand Up @@ -762,6 +768,55 @@ static int unmark_stack_slots_dynptr(struct bpf_verifier_env *env, struct bpf_re
return 0;
}

static void __mark_reg_unknown(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);

static int destroy_if_dynptr_stack_slot(struct bpf_verifier_env *env,
struct bpf_func_state *state, int spi)
{
int i;

/* We always ensure that STACK_DYNPTR is never set partially,
* hence just checking for slot_type[0] is enough. This is
* different for STACK_SPILL, where it may be only set for
* 1 byte, so code has to use is_spilled_reg.
*/
if (state->stack[spi].slot_type[0] != STACK_DYNPTR)
return 0;

/* Reposition spi to first slot */
if (!state->stack[spi].spilled_ptr.dynptr.first_slot)
spi = spi + 1;

if (dynptr_type_refcounted(state->stack[spi].spilled_ptr.dynptr.type)) {
verbose(env, "cannot overwrite referenced dynptr\n");
return -EINVAL;
}

mark_stack_slot_scratched(env, spi);
mark_stack_slot_scratched(env, spi - 1);

/* Writing partially to one dynptr stack slot destroys both. */
for (i = 0; i < BPF_REG_SIZE; i++) {
state->stack[spi].slot_type[i] = STACK_INVALID;
state->stack[spi - 1].slot_type[i] = STACK_INVALID;
}

/* TODO: Invalidate any slices associated with this dynptr */

/* Do not release reference state, we are destroying dynptr on stack,
* not using some helper to release it. Just reset register.
*/
__mark_reg_not_init(env, &state->stack[spi].spilled_ptr);
__mark_reg_not_init(env, &state->stack[spi - 1].spilled_ptr);

/* Same reason as unmark_stack_slots_dynptr above */
state->stack[spi].spilled_ptr.live |= REG_LIVE_WRITTEN;
state->stack[spi - 1].spilled_ptr.live |= REG_LIVE_WRITTEN;

return 0;
}

static bool is_dynptr_reg_valid_uninit(struct bpf_verifier_env *env, struct bpf_reg_state *reg)
{
struct bpf_func_state *state = func(env, reg);
Expand Down Expand Up @@ -1300,9 +1355,6 @@ static const int caller_saved[CALLER_SAVED_REGS] = {
BPF_REG_0, BPF_REG_1, BPF_REG_2, BPF_REG_3, BPF_REG_4, BPF_REG_5
};

static void __mark_reg_not_init(const struct bpf_verifier_env *env,
struct bpf_reg_state *reg);

/* This helper doesn't clear reg->id */
static void ___mark_reg_known(struct bpf_reg_state *reg, u64 imm)
{
Expand Down Expand Up @@ -3052,6 +3104,10 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
env->insn_aux_data[insn_idx].sanitize_stack_spill = true;
}

err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;

mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
!register_is_null(reg) && env->bpf_capable) {
Expand Down Expand Up @@ -3165,6 +3221,14 @@ static int check_stack_write_var_off(struct bpf_verifier_env *env,
if (err)
return err;

for (i = min_off; i < max_off; i++) {
int spi;

spi = get_spi(i);
err = destroy_if_dynptr_stack_slot(env, state, spi);
if (err)
return err;
}

/* Variable offset writes destroy any spilled pointers in range. */
for (i = min_off; i < max_off; i++) {
Expand Down Expand Up @@ -5126,6 +5190,31 @@ static int check_stack_range_initialized(
}

if (meta && meta->raw_mode) {
/* Ensure we won't be overwriting dynptrs when simulating byte
* by byte access in check_helper_call using meta.access_size.
* This would be a problem if we have a helper in the future
* which takes:
*
* helper(uninit_mem, len, dynptr)
*
* Now, uninint_mem may overlap with dynptr pointer. Hence, it
* may end up writing to dynptr itself when touching memory from
* arg 1. This can be relaxed on a case by case basis for known
* safe cases, but reject due to the possibilitiy of aliasing by
* default.
*/
for (i = min_off; i < max_off + access_size; i++) {
int stack_off = -i - 1;

spi = get_spi(i);
/* raw_mode may write past allocated_stack */
if (state->allocated_stack <= stack_off)
continue;
if (state->stack[spi].slot_type[stack_off % BPF_REG_SIZE] == STACK_DYNPTR) {
verbose(env, "potential write to dynptr at off=%d disallowed\n", i);
return -EACCES;
}
}
meta->access_size = access_size;
meta->regno = regno;
return 0;
Expand Down
Loading