|
|
|
|
@@ -710,6 +710,7 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
|
|
|
|
|
free_func_state(dst_state->frame[i]);
|
|
|
|
|
dst_state->frame[i] = NULL;
|
|
|
|
|
}
|
|
|
|
|
dst_state->speculative = src->speculative;
|
|
|
|
|
dst_state->curframe = src->curframe;
|
|
|
|
|
for (i = 0; i <= src->curframe; i++) {
|
|
|
|
|
dst = dst_state->frame[i];
|
|
|
|
|
@@ -754,7 +755,8 @@ static int pop_stack(struct bpf_verifier_env *env, int *prev_insn_idx,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
|
|
|
int insn_idx, int prev_insn_idx)
|
|
|
|
|
int insn_idx, int prev_insn_idx,
|
|
|
|
|
bool speculative)
|
|
|
|
|
{
|
|
|
|
|
struct bpf_verifier_state *cur = env->cur_state;
|
|
|
|
|
struct bpf_verifier_stack_elem *elem;
|
|
|
|
|
@@ -772,6 +774,7 @@ static struct bpf_verifier_state *push_stack(struct bpf_verifier_env *env,
|
|
|
|
|
err = copy_verifier_state(&elem->st, cur);
|
|
|
|
|
if (err)
|
|
|
|
|
goto err;
|
|
|
|
|
elem->st.speculative |= speculative;
|
|
|
|
|
if (env->stack_size > BPF_COMPLEXITY_LIMIT_STACK) {
|
|
|
|
|
verbose(env, "BPF program is too complex\n");
|
|
|
|
|
goto err;
|
|
|
|
|
@@ -1387,6 +1390,31 @@ static int check_stack_read(struct bpf_verifier_env *env,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int check_stack_access(struct bpf_verifier_env *env,
|
|
|
|
|
const struct bpf_reg_state *reg,
|
|
|
|
|
int off, int size)
|
|
|
|
|
{
|
|
|
|
|
/* Stack accesses must be at a fixed offset, so that we
|
|
|
|
|
* can determine what type of data were returned. See
|
|
|
|
|
* check_stack_read().
|
|
|
|
|
*/
|
|
|
|
|
if (!tnum_is_const(reg->var_off)) {
|
|
|
|
|
char tn_buf[48];
|
|
|
|
|
|
|
|
|
|
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
|
|
|
verbose(env, "variable stack access var_off=%s off=%d size=%d",
|
|
|
|
|
tn_buf, off, size);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (off >= 0 || off < -MAX_BPF_STACK) {
|
|
|
|
|
verbose(env, "invalid stack off=%d size=%d\n", off, size);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* check read/write into map element returned by bpf_map_lookup_elem() */
|
|
|
|
|
static int __check_map_access(struct bpf_verifier_env *env, u32 regno, int off,
|
|
|
|
|
int size, bool zero_size_allowed)
|
|
|
|
|
@@ -1418,13 +1446,17 @@ static int check_map_access(struct bpf_verifier_env *env, u32 regno,
|
|
|
|
|
*/
|
|
|
|
|
if (env->log.level)
|
|
|
|
|
print_verifier_state(env, state);
|
|
|
|
|
|
|
|
|
|
/* The minimum value is only important with signed
|
|
|
|
|
* comparisons where we can't assume the floor of a
|
|
|
|
|
* value is 0. If we are using signed variables for our
|
|
|
|
|
* index'es we need to make sure that whatever we use
|
|
|
|
|
* will have a set floor within our range.
|
|
|
|
|
*/
|
|
|
|
|
if (reg->smin_value < 0) {
|
|
|
|
|
if (reg->smin_value < 0 &&
|
|
|
|
|
(reg->smin_value == S64_MIN ||
|
|
|
|
|
(off + reg->smin_value != (s64)(s32)(off + reg->smin_value)) ||
|
|
|
|
|
reg->smin_value + off < 0)) {
|
|
|
|
|
verbose(env, "R%d min value is negative, either use unsigned index or do a if (index >=0) check.\n",
|
|
|
|
|
regno);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
@@ -1954,24 +1986,10 @@ static int check_mem_access(struct bpf_verifier_env *env, int insn_idx, u32 regn
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
} else if (reg->type == PTR_TO_STACK) {
|
|
|
|
|
/* stack accesses must be at a fixed offset, so that we can
|
|
|
|
|
* determine what type of data were returned.
|
|
|
|
|
* See check_stack_read().
|
|
|
|
|
*/
|
|
|
|
|
if (!tnum_is_const(reg->var_off)) {
|
|
|
|
|
char tn_buf[48];
|
|
|
|
|
|
|
|
|
|
tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
|
|
|
|
|
verbose(env, "variable stack access var_off=%s off=%d size=%d",
|
|
|
|
|
tn_buf, off, size);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
off += reg->var_off.value;
|
|
|
|
|
if (off >= 0 || off < -MAX_BPF_STACK) {
|
|
|
|
|
verbose(env, "invalid stack off=%d size=%d\n", off,
|
|
|
|
|
size);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
err = check_stack_access(env, reg, off, size);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
state = func(env, reg);
|
|
|
|
|
err = update_stack_depth(env, state, off);
|
|
|
|
|
@@ -3052,6 +3070,102 @@ static bool check_reg_sane_offset(struct bpf_verifier_env *env,
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
|
|
|
|
|
{
|
|
|
|
|
return &env->insn_aux_data[env->insn_idx];
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
|
|
|
|
|
u32 *ptr_limit, u8 opcode, bool off_is_neg)
|
|
|
|
|
{
|
|
|
|
|
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
|
|
|
|
|
(opcode == BPF_SUB && !off_is_neg);
|
|
|
|
|
u32 off;
|
|
|
|
|
|
|
|
|
|
switch (ptr_reg->type) {
|
|
|
|
|
case PTR_TO_STACK:
|
|
|
|
|
off = ptr_reg->off + ptr_reg->var_off.value;
|
|
|
|
|
if (mask_to_left)
|
|
|
|
|
*ptr_limit = MAX_BPF_STACK + off;
|
|
|
|
|
else
|
|
|
|
|
*ptr_limit = -off;
|
|
|
|
|
return 0;
|
|
|
|
|
case PTR_TO_MAP_VALUE:
|
|
|
|
|
if (mask_to_left) {
|
|
|
|
|
*ptr_limit = ptr_reg->umax_value + ptr_reg->off;
|
|
|
|
|
} else {
|
|
|
|
|
off = ptr_reg->smin_value + ptr_reg->off;
|
|
|
|
|
*ptr_limit = ptr_reg->map_ptr->value_size - off;
|
|
|
|
|
}
|
|
|
|
|
return 0;
|
|
|
|
|
default:
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static int sanitize_ptr_alu(struct bpf_verifier_env *env,
|
|
|
|
|
struct bpf_insn *insn,
|
|
|
|
|
const struct bpf_reg_state *ptr_reg,
|
|
|
|
|
struct bpf_reg_state *dst_reg,
|
|
|
|
|
bool off_is_neg)
|
|
|
|
|
{
|
|
|
|
|
struct bpf_verifier_state *vstate = env->cur_state;
|
|
|
|
|
struct bpf_insn_aux_data *aux = cur_aux(env);
|
|
|
|
|
bool ptr_is_dst_reg = ptr_reg == dst_reg;
|
|
|
|
|
u8 opcode = BPF_OP(insn->code);
|
|
|
|
|
u32 alu_state, alu_limit;
|
|
|
|
|
struct bpf_reg_state tmp;
|
|
|
|
|
bool ret;
|
|
|
|
|
|
|
|
|
|
if (env->allow_ptr_leaks || BPF_SRC(insn->code) == BPF_K)
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* We already marked aux for masking from non-speculative
|
|
|
|
|
* paths, thus we got here in the first place. We only care
|
|
|
|
|
* to explore bad access from here.
|
|
|
|
|
*/
|
|
|
|
|
if (vstate->speculative)
|
|
|
|
|
goto do_sim;
|
|
|
|
|
|
|
|
|
|
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
|
|
|
|
|
alu_state |= ptr_is_dst_reg ?
|
|
|
|
|
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
|
|
|
|
|
|
|
|
|
|
if (retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg))
|
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
|
|
/* If we arrived here from different branches with different
|
|
|
|
|
* limits to sanitize, then this won't work.
|
|
|
|
|
*/
|
|
|
|
|
if (aux->alu_state &&
|
|
|
|
|
(aux->alu_state != alu_state ||
|
|
|
|
|
aux->alu_limit != alu_limit))
|
|
|
|
|
return -EACCES;
|
|
|
|
|
|
|
|
|
|
/* Corresponding fixup done in fixup_bpf_calls(). */
|
|
|
|
|
aux->alu_state = alu_state;
|
|
|
|
|
aux->alu_limit = alu_limit;
|
|
|
|
|
|
|
|
|
|
do_sim:
|
|
|
|
|
/* Simulate and find potential out-of-bounds access under
|
|
|
|
|
* speculative execution from truncation as a result of
|
|
|
|
|
* masking when off was not within expected range. If off
|
|
|
|
|
* sits in dst, then we temporarily need to move ptr there
|
|
|
|
|
* to simulate dst (== 0) +/-= ptr. Needed, for example,
|
|
|
|
|
* for cases where we use K-based arithmetic in one direction
|
|
|
|
|
* and truncated reg-based in the other in order to explore
|
|
|
|
|
* bad access.
|
|
|
|
|
*/
|
|
|
|
|
if (!ptr_is_dst_reg) {
|
|
|
|
|
tmp = *dst_reg;
|
|
|
|
|
*dst_reg = *ptr_reg;
|
|
|
|
|
}
|
|
|
|
|
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
|
|
|
|
|
if (!ptr_is_dst_reg)
|
|
|
|
|
*dst_reg = tmp;
|
|
|
|
|
return !ret ? -EFAULT : 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
|
|
|
|
|
* Caller should also handle BPF_MOV case separately.
|
|
|
|
|
* If we return -EACCES, caller may want to try again treating pointer as a
|
|
|
|
|
@@ -3070,8 +3184,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
|
|
|
|
|
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
|
|
|
|
|
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
|
|
|
|
|
u32 dst = insn->dst_reg, src = insn->src_reg;
|
|
|
|
|
u8 opcode = BPF_OP(insn->code);
|
|
|
|
|
u32 dst = insn->dst_reg;
|
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
|
|
dst_reg = ®s[dst];
|
|
|
|
|
|
|
|
|
|
@@ -3104,6 +3219,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
verbose(env, "R%d pointer arithmetic on %s prohibited\n",
|
|
|
|
|
dst, reg_type_str[ptr_reg->type]);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
case PTR_TO_MAP_VALUE:
|
|
|
|
|
if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
|
|
|
|
|
verbose(env, "R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
|
|
|
|
|
off_reg == dst_reg ? dst : src);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
/* fall-through */
|
|
|
|
|
default:
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
@@ -3120,6 +3242,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
|
|
|
|
|
switch (opcode) {
|
|
|
|
|
case BPF_ADD:
|
|
|
|
|
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
verbose(env, "R%d tried to add from different maps or paths\n", dst);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
/* We can take a fixed offset as long as it doesn't overflow
|
|
|
|
|
* the s32 'off' field
|
|
|
|
|
*/
|
|
|
|
|
@@ -3170,6 +3297,11 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case BPF_SUB:
|
|
|
|
|
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
|
|
|
|
|
if (ret < 0) {
|
|
|
|
|
verbose(env, "R%d tried to sub from different maps or paths\n", dst);
|
|
|
|
|
return ret;
|
|
|
|
|
}
|
|
|
|
|
if (dst_reg == off_reg) {
|
|
|
|
|
/* scalar -= pointer. Creates an unknown scalar */
|
|
|
|
|
verbose(env, "R%d tried to subtract pointer from scalar\n",
|
|
|
|
|
@@ -3249,6 +3381,25 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
|
|
|
|
|
__update_reg_bounds(dst_reg);
|
|
|
|
|
__reg_deduce_bounds(dst_reg);
|
|
|
|
|
__reg_bound_offset(dst_reg);
|
|
|
|
|
|
|
|
|
|
/* For unprivileged we require that resulting offset must be in bounds
|
|
|
|
|
* in order to be able to sanitize access later on.
|
|
|
|
|
*/
|
|
|
|
|
if (!env->allow_ptr_leaks) {
|
|
|
|
|
if (dst_reg->type == PTR_TO_MAP_VALUE &&
|
|
|
|
|
check_map_access(env, dst, dst_reg->off, 1, false)) {
|
|
|
|
|
verbose(env, "R%d pointer arithmetic of map value goes out of range, "
|
|
|
|
|
"prohibited for !root\n", dst);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
} else if (dst_reg->type == PTR_TO_STACK &&
|
|
|
|
|
check_stack_access(env, dst_reg, dst_reg->off +
|
|
|
|
|
dst_reg->var_off.value, 1)) {
|
|
|
|
|
verbose(env, "R%d stack pointer arithmetic goes out of range, "
|
|
|
|
|
"prohibited for !root\n", dst);
|
|
|
|
|
return -EACCES;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -4348,7 +4499,8 @@ static int check_cond_jmp_op(struct bpf_verifier_env *env,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx);
|
|
|
|
|
other_branch = push_stack(env, *insn_idx + insn->off + 1, *insn_idx,
|
|
|
|
|
false);
|
|
|
|
|
if (!other_branch)
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
other_branch_regs = other_branch->frame[other_branch->curframe]->regs;
|
|
|
|
|
@@ -5458,6 +5610,12 @@ static bool states_equal(struct bpf_verifier_env *env,
|
|
|
|
|
if (old->curframe != cur->curframe)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* Verification state from speculative execution simulation
|
|
|
|
|
* must never prune a non-speculative execution one.
|
|
|
|
|
*/
|
|
|
|
|
if (old->speculative && !cur->speculative)
|
|
|
|
|
return false;
|
|
|
|
|
|
|
|
|
|
/* for states to be equal callsites have to be the same
|
|
|
|
|
* and all frame states need to be equivalent
|
|
|
|
|
*/
|
|
|
|
|
@@ -5650,7 +5808,6 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
struct bpf_insn *insns = env->prog->insnsi;
|
|
|
|
|
struct bpf_reg_state *regs;
|
|
|
|
|
int insn_cnt = env->prog->len, i;
|
|
|
|
|
int insn_idx, prev_insn_idx = 0;
|
|
|
|
|
int insn_processed = 0;
|
|
|
|
|
bool do_print_state = false;
|
|
|
|
|
|
|
|
|
|
@@ -5660,6 +5817,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
if (!state)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
state->curframe = 0;
|
|
|
|
|
state->speculative = false;
|
|
|
|
|
state->frame[0] = kzalloc(sizeof(struct bpf_func_state), GFP_KERNEL);
|
|
|
|
|
if (!state->frame[0]) {
|
|
|
|
|
kfree(state);
|
|
|
|
|
@@ -5670,19 +5828,19 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
BPF_MAIN_FUNC /* callsite */,
|
|
|
|
|
0 /* frameno */,
|
|
|
|
|
0 /* subprogno, zero == main subprog */);
|
|
|
|
|
insn_idx = 0;
|
|
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
|
struct bpf_insn *insn;
|
|
|
|
|
u8 class;
|
|
|
|
|
int err;
|
|
|
|
|
|
|
|
|
|
if (insn_idx >= insn_cnt) {
|
|
|
|
|
if (env->insn_idx >= insn_cnt) {
|
|
|
|
|
verbose(env, "invalid insn idx %d insn_cnt %d\n",
|
|
|
|
|
insn_idx, insn_cnt);
|
|
|
|
|
env->insn_idx, insn_cnt);
|
|
|
|
|
return -EFAULT;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
insn = &insns[insn_idx];
|
|
|
|
|
insn = &insns[env->insn_idx];
|
|
|
|
|
class = BPF_CLASS(insn->code);
|
|
|
|
|
|
|
|
|
|
if (++insn_processed > BPF_COMPLEXITY_LIMIT_INSNS) {
|
|
|
|
|
@@ -5692,17 +5850,19 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
return -E2BIG;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
err = is_state_visited(env, insn_idx);
|
|
|
|
|
err = is_state_visited(env, env->insn_idx);
|
|
|
|
|
if (err < 0)
|
|
|
|
|
return err;
|
|
|
|
|
if (err == 1) {
|
|
|
|
|
/* found equivalent state, can prune the search */
|
|
|
|
|
if (env->log.level) {
|
|
|
|
|
if (do_print_state)
|
|
|
|
|
verbose(env, "\nfrom %d to %d: safe\n",
|
|
|
|
|
prev_insn_idx, insn_idx);
|
|
|
|
|
verbose(env, "\nfrom %d to %d%s: safe\n",
|
|
|
|
|
env->prev_insn_idx, env->insn_idx,
|
|
|
|
|
env->cur_state->speculative ?
|
|
|
|
|
" (speculative execution)" : "");
|
|
|
|
|
else
|
|
|
|
|
verbose(env, "%d: safe\n", insn_idx);
|
|
|
|
|
verbose(env, "%d: safe\n", env->insn_idx);
|
|
|
|
|
}
|
|
|
|
|
goto process_bpf_exit;
|
|
|
|
|
}
|
|
|
|
|
@@ -5715,10 +5875,12 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
|
|
|
|
|
if (env->log.level > 1 || (env->log.level && do_print_state)) {
|
|
|
|
|
if (env->log.level > 1)
|
|
|
|
|
verbose(env, "%d:", insn_idx);
|
|
|
|
|
verbose(env, "%d:", env->insn_idx);
|
|
|
|
|
else
|
|
|
|
|
verbose(env, "\nfrom %d to %d:",
|
|
|
|
|
prev_insn_idx, insn_idx);
|
|
|
|
|
verbose(env, "\nfrom %d to %d%s:",
|
|
|
|
|
env->prev_insn_idx, env->insn_idx,
|
|
|
|
|
env->cur_state->speculative ?
|
|
|
|
|
" (speculative execution)" : "");
|
|
|
|
|
print_verifier_state(env, state->frame[state->curframe]);
|
|
|
|
|
do_print_state = false;
|
|
|
|
|
}
|
|
|
|
|
@@ -5729,20 +5891,20 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
.private_data = env,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
verbose_linfo(env, insn_idx, "; ");
|
|
|
|
|
verbose(env, "%d: ", insn_idx);
|
|
|
|
|
verbose_linfo(env, env->insn_idx, "; ");
|
|
|
|
|
verbose(env, "%d: ", env->insn_idx);
|
|
|
|
|
print_bpf_insn(&cbs, insn, env->allow_ptr_leaks);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (bpf_prog_is_dev_bound(env->prog->aux)) {
|
|
|
|
|
err = bpf_prog_offload_verify_insn(env, insn_idx,
|
|
|
|
|
prev_insn_idx);
|
|
|
|
|
err = bpf_prog_offload_verify_insn(env, env->insn_idx,
|
|
|
|
|
env->prev_insn_idx);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
regs = cur_regs(env);
|
|
|
|
|
env->insn_aux_data[insn_idx].seen = true;
|
|
|
|
|
env->insn_aux_data[env->insn_idx].seen = true;
|
|
|
|
|
|
|
|
|
|
if (class == BPF_ALU || class == BPF_ALU64) {
|
|
|
|
|
err = check_alu_op(env, insn);
|
|
|
|
|
@@ -5768,13 +5930,13 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
/* check that memory (src_reg + off) is readable,
|
|
|
|
|
* the state of dst_reg will be updated by this func
|
|
|
|
|
*/
|
|
|
|
|
err = check_mem_access(env, insn_idx, insn->src_reg, insn->off,
|
|
|
|
|
BPF_SIZE(insn->code), BPF_READ,
|
|
|
|
|
insn->dst_reg, false);
|
|
|
|
|
err = check_mem_access(env, env->insn_idx, insn->src_reg,
|
|
|
|
|
insn->off, BPF_SIZE(insn->code),
|
|
|
|
|
BPF_READ, insn->dst_reg, false);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
prev_src_type = &env->insn_aux_data[insn_idx].ptr_type;
|
|
|
|
|
prev_src_type = &env->insn_aux_data[env->insn_idx].ptr_type;
|
|
|
|
|
|
|
|
|
|
if (*prev_src_type == NOT_INIT) {
|
|
|
|
|
/* saw a valid insn
|
|
|
|
|
@@ -5799,10 +5961,10 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
enum bpf_reg_type *prev_dst_type, dst_reg_type;
|
|
|
|
|
|
|
|
|
|
if (BPF_MODE(insn->code) == BPF_XADD) {
|
|
|
|
|
err = check_xadd(env, insn_idx, insn);
|
|
|
|
|
err = check_xadd(env, env->insn_idx, insn);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
insn_idx++;
|
|
|
|
|
env->insn_idx++;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
@@ -5818,13 +5980,13 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
dst_reg_type = regs[insn->dst_reg].type;
|
|
|
|
|
|
|
|
|
|
/* check that memory (dst_reg + off) is writeable */
|
|
|
|
|
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
|
|
|
|
BPF_SIZE(insn->code), BPF_WRITE,
|
|
|
|
|
insn->src_reg, false);
|
|
|
|
|
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
|
|
|
|
|
insn->off, BPF_SIZE(insn->code),
|
|
|
|
|
BPF_WRITE, insn->src_reg, false);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
prev_dst_type = &env->insn_aux_data[insn_idx].ptr_type;
|
|
|
|
|
prev_dst_type = &env->insn_aux_data[env->insn_idx].ptr_type;
|
|
|
|
|
|
|
|
|
|
if (*prev_dst_type == NOT_INIT) {
|
|
|
|
|
*prev_dst_type = dst_reg_type;
|
|
|
|
|
@@ -5852,9 +6014,9 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/* check that memory (dst_reg + off) is writeable */
|
|
|
|
|
err = check_mem_access(env, insn_idx, insn->dst_reg, insn->off,
|
|
|
|
|
BPF_SIZE(insn->code), BPF_WRITE,
|
|
|
|
|
-1, false);
|
|
|
|
|
err = check_mem_access(env, env->insn_idx, insn->dst_reg,
|
|
|
|
|
insn->off, BPF_SIZE(insn->code),
|
|
|
|
|
BPF_WRITE, -1, false);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
@@ -5872,9 +6034,9 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
|
|
|
err = check_func_call(env, insn, &insn_idx);
|
|
|
|
|
err = check_func_call(env, insn, &env->insn_idx);
|
|
|
|
|
else
|
|
|
|
|
err = check_helper_call(env, insn->imm, insn_idx);
|
|
|
|
|
err = check_helper_call(env, insn->imm, env->insn_idx);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
@@ -5887,7 +6049,7 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
insn_idx += insn->off + 1;
|
|
|
|
|
env->insn_idx += insn->off + 1;
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
} else if (opcode == BPF_EXIT) {
|
|
|
|
|
@@ -5901,8 +6063,8 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
|
|
|
|
|
if (state->curframe) {
|
|
|
|
|
/* exit from nested function */
|
|
|
|
|
prev_insn_idx = insn_idx;
|
|
|
|
|
err = prepare_func_exit(env, &insn_idx);
|
|
|
|
|
env->prev_insn_idx = env->insn_idx;
|
|
|
|
|
err = prepare_func_exit(env, &env->insn_idx);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
do_print_state = true;
|
|
|
|
|
@@ -5932,7 +6094,8 @@ static int do_check(struct bpf_verifier_env *env)
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
process_bpf_exit:
|
|
|
|
|
err = pop_stack(env, &prev_insn_idx, &insn_idx);
|
|
|
|
|
err = pop_stack(env, &env->prev_insn_idx,
|
|
|
|
|
&env->insn_idx);
|
|
|
|
|
if (err < 0) {
|
|
|
|
|
if (err != -ENOENT)
|
|
|
|
|
return err;
|
|
|
|
|
@@ -5942,7 +6105,7 @@ process_bpf_exit:
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
err = check_cond_jmp_op(env, insn, &insn_idx);
|
|
|
|
|
err = check_cond_jmp_op(env, insn, &env->insn_idx);
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
}
|
|
|
|
|
@@ -5959,8 +6122,8 @@ process_bpf_exit:
|
|
|
|
|
if (err)
|
|
|
|
|
return err;
|
|
|
|
|
|
|
|
|
|
insn_idx++;
|
|
|
|
|
env->insn_aux_data[insn_idx].seen = true;
|
|
|
|
|
env->insn_idx++;
|
|
|
|
|
env->insn_aux_data[env->insn_idx].seen = true;
|
|
|
|
|
} else {
|
|
|
|
|
verbose(env, "invalid BPF_LD mode\n");
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
@@ -5970,7 +6133,7 @@ process_bpf_exit:
|
|
|
|
|
return -EINVAL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
insn_idx++;
|
|
|
|
|
env->insn_idx++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
verbose(env, "processed %d insns (limit %d), stack depth ",
|
|
|
|
|
@@ -6709,6 +6872,57 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (insn->code == (BPF_ALU64 | BPF_ADD | BPF_X) ||
|
|
|
|
|
insn->code == (BPF_ALU64 | BPF_SUB | BPF_X)) {
|
|
|
|
|
const u8 code_add = BPF_ALU64 | BPF_ADD | BPF_X;
|
|
|
|
|
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
|
|
|
|
|
struct bpf_insn insn_buf[16];
|
|
|
|
|
struct bpf_insn *patch = &insn_buf[0];
|
|
|
|
|
bool issrc, isneg;
|
|
|
|
|
u32 off_reg;
|
|
|
|
|
|
|
|
|
|
aux = &env->insn_aux_data[i + delta];
|
|
|
|
|
if (!aux->alu_state)
|
|
|
|
|
continue;
|
|
|
|
|
|
|
|
|
|
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
|
|
|
|
|
issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
|
|
|
|
|
BPF_ALU_SANITIZE_SRC;
|
|
|
|
|
|
|
|
|
|
off_reg = issrc ? insn->src_reg : insn->dst_reg;
|
|
|
|
|
if (isneg)
|
|
|
|
|
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
|
|
|
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit - 1);
|
|
|
|
|
*patch++ = BPF_ALU64_REG(BPF_SUB, BPF_REG_AX, off_reg);
|
|
|
|
|
*patch++ = BPF_ALU64_REG(BPF_OR, BPF_REG_AX, off_reg);
|
|
|
|
|
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
|
|
|
|
|
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
|
|
|
|
|
if (issrc) {
|
|
|
|
|
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX,
|
|
|
|
|
off_reg);
|
|
|
|
|
insn->src_reg = BPF_REG_AX;
|
|
|
|
|
} else {
|
|
|
|
|
*patch++ = BPF_ALU64_REG(BPF_AND, off_reg,
|
|
|
|
|
BPF_REG_AX);
|
|
|
|
|
}
|
|
|
|
|
if (isneg)
|
|
|
|
|
insn->code = insn->code == code_add ?
|
|
|
|
|
code_sub : code_add;
|
|
|
|
|
*patch++ = *insn;
|
|
|
|
|
if (issrc && isneg)
|
|
|
|
|
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
|
|
|
|
|
cnt = patch - insn_buf;
|
|
|
|
|
|
|
|
|
|
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
|
|
|
|
|
if (!new_prog)
|
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
|
|
delta += cnt - 1;
|
|
|
|
|
env->prog = prog = new_prog;
|
|
|
|
|
insn = new_prog->insnsi + i + delta;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (insn->code != (BPF_JMP | BPF_CALL))
|
|
|
|
|
continue;
|
|
|
|
|
if (insn->src_reg == BPF_PSEUDO_CALL)
|
|
|
|
|
|