Skip to content

Commit 3713b58

Browse files
author
Alexei Starovoitov
committed
Merge branch 'bpf-verifier-improve-precision-of-bpf_add-and-bpf_sub'
Harishankar Vishwanathan says: ==================== bpf, verifier: Improve precision of BPF_ADD and BPF_SUB This patchset improves the precision of BPF_ADD and BPF_SUB range tracking. It also adds selftests that exercise the cases where precision improvement occurs, and selftests for the cases where precise bounds cannot be computed and the output register state values are set to unbounded. Changelog: v3: * Improve readability in selftests and commit message by using more readable constants (suggested by Eduard Zingerman). * Add four new selftests for the cases where precise output register state bounds cannot be computed in scalar(32)_min_max_add/sub, so the output register state must be set to unbounded, i.e., [0, U64_MAX] or [0, U32_MAX]. * Add suggested-by Eduard tag to commit message for changes to verifier_bounds.c v2: * Add clearer example of precision improvement in the commit message for verifier.c changes. * Add selftests that exercise the precision improvement to verifier_bounds.c (suggested by Eduard Zingerman). v1: https://lore.kernel.org/bpf/[email protected]/ ==================== Link: https://patch.msgid.link/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]>
2 parents 3ce7cdd + e1d7945 commit 3713b58

File tree

2 files changed

+217
-20
lines changed

2 files changed

+217
-20
lines changed

kernel/bpf/verifier.c

Lines changed: 56 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -14605,14 +14605,25 @@ static void scalar32_min_max_add(struct bpf_reg_state *dst_reg,
1460514605
s32 *dst_smax = &dst_reg->s32_max_value;
1460614606
u32 *dst_umin = &dst_reg->u32_min_value;
1460714607
u32 *dst_umax = &dst_reg->u32_max_value;
14608+
u32 umin_val = src_reg->u32_min_value;
14609+
u32 umax_val = src_reg->u32_max_value;
14610+
bool min_overflow, max_overflow;
1460814611

1460914612
if (check_add_overflow(*dst_smin, src_reg->s32_min_value, dst_smin) ||
1461014613
check_add_overflow(*dst_smax, src_reg->s32_max_value, dst_smax)) {
1461114614
*dst_smin = S32_MIN;
1461214615
*dst_smax = S32_MAX;
1461314616
}
14614-
if (check_add_overflow(*dst_umin, src_reg->u32_min_value, dst_umin) ||
14615-
check_add_overflow(*dst_umax, src_reg->u32_max_value, dst_umax)) {
14617+
14618+
/* If either all additions overflow or no additions overflow, then
14619+
* it is okay to set: dst_umin = dst_umin + src_umin, dst_umax =
14620+
* dst_umax + src_umax. Otherwise (some additions overflow), set
14621+
* the output bounds to unbounded.
14622+
*/
14623+
min_overflow = check_add_overflow(*dst_umin, umin_val, dst_umin);
14624+
max_overflow = check_add_overflow(*dst_umax, umax_val, dst_umax);
14625+
14626+
if (!min_overflow && max_overflow) {
1461614627
*dst_umin = 0;
1461714628
*dst_umax = U32_MAX;
1461814629
}
@@ -14625,14 +14636,25 @@ static void scalar_min_max_add(struct bpf_reg_state *dst_reg,
1462514636
s64 *dst_smax = &dst_reg->smax_value;
1462614637
u64 *dst_umin = &dst_reg->umin_value;
1462714638
u64 *dst_umax = &dst_reg->umax_value;
14639+
u64 umin_val = src_reg->umin_value;
14640+
u64 umax_val = src_reg->umax_value;
14641+
bool min_overflow, max_overflow;
1462814642

1462914643
if (check_add_overflow(*dst_smin, src_reg->smin_value, dst_smin) ||
1463014644
check_add_overflow(*dst_smax, src_reg->smax_value, dst_smax)) {
1463114645
*dst_smin = S64_MIN;
1463214646
*dst_smax = S64_MAX;
1463314647
}
14634-
if (check_add_overflow(*dst_umin, src_reg->umin_value, dst_umin) ||
14635-
check_add_overflow(*dst_umax, src_reg->umax_value, dst_umax)) {
14648+
14649+
/* If either all additions overflow or no additions overflow, then
14650+
* it is okay to set: dst_umin = dst_umin + src_umin, dst_umax =
14651+
* dst_umax + src_umax. Otherwise (some additions overflow), set
14652+
* the output bounds to unbounded.
14653+
*/
14654+
min_overflow = check_add_overflow(*dst_umin, umin_val, dst_umin);
14655+
max_overflow = check_add_overflow(*dst_umax, umax_val, dst_umax);
14656+
14657+
if (!min_overflow && max_overflow) {
1463614658
*dst_umin = 0;
1463714659
*dst_umax = U64_MAX;
1463814660
}
@@ -14643,23 +14665,30 @@ static void scalar32_min_max_sub(struct bpf_reg_state *dst_reg,
1464314665
{
1464414666
s32 *dst_smin = &dst_reg->s32_min_value;
1464514667
s32 *dst_smax = &dst_reg->s32_max_value;
14668+
u32 *dst_umin = &dst_reg->u32_min_value;
14669+
u32 *dst_umax = &dst_reg->u32_max_value;
1464614670
u32 umin_val = src_reg->u32_min_value;
1464714671
u32 umax_val = src_reg->u32_max_value;
14672+
bool min_underflow, max_underflow;
1464814673

1464914674
if (check_sub_overflow(*dst_smin, src_reg->s32_max_value, dst_smin) ||
1465014675
check_sub_overflow(*dst_smax, src_reg->s32_min_value, dst_smax)) {
1465114676
/* Overflow possible, we know nothing */
1465214677
*dst_smin = S32_MIN;
1465314678
*dst_smax = S32_MAX;
1465414679
}
14655-
if (dst_reg->u32_min_value < umax_val) {
14656-
/* Overflow possible, we know nothing */
14657-
dst_reg->u32_min_value = 0;
14658-
dst_reg->u32_max_value = U32_MAX;
14659-
} else {
14660-
/* Cannot overflow (as long as bounds are consistent) */
14661-
dst_reg->u32_min_value -= umax_val;
14662-
dst_reg->u32_max_value -= umin_val;
14680+
14681+
/* If either all subtractions underflow or no subtractions
14682+
* underflow, it is okay to set: dst_umin = dst_umin - src_umax,
14683+
* dst_umax = dst_umax - src_umin. Otherwise (some subtractions
14684+
* underflow), set the output bounds to unbounded.
14685+
*/
14686+
min_underflow = check_sub_overflow(*dst_umin, umax_val, dst_umin);
14687+
max_underflow = check_sub_overflow(*dst_umax, umin_val, dst_umax);
14688+
14689+
if (min_underflow && !max_underflow) {
14690+
*dst_umin = 0;
14691+
*dst_umax = U32_MAX;
1466314692
}
1466414693
}
1466514694

@@ -14668,23 +14697,30 @@ static void scalar_min_max_sub(struct bpf_reg_state *dst_reg,
1466814697
{
1466914698
s64 *dst_smin = &dst_reg->smin_value;
1467014699
s64 *dst_smax = &dst_reg->smax_value;
14700+
u64 *dst_umin = &dst_reg->umin_value;
14701+
u64 *dst_umax = &dst_reg->umax_value;
1467114702
u64 umin_val = src_reg->umin_value;
1467214703
u64 umax_val = src_reg->umax_value;
14704+
bool min_underflow, max_underflow;
1467314705

1467414706
if (check_sub_overflow(*dst_smin, src_reg->smax_value, dst_smin) ||
1467514707
check_sub_overflow(*dst_smax, src_reg->smin_value, dst_smax)) {
1467614708
/* Overflow possible, we know nothing */
1467714709
*dst_smin = S64_MIN;
1467814710
*dst_smax = S64_MAX;
1467914711
}
14680-
if (dst_reg->umin_value < umax_val) {
14681-
/* Overflow possible, we know nothing */
14682-
dst_reg->umin_value = 0;
14683-
dst_reg->umax_value = U64_MAX;
14684-
} else {
14685-
/* Cannot overflow (as long as bounds are consistent) */
14686-
dst_reg->umin_value -= umax_val;
14687-
dst_reg->umax_value -= umin_val;
14712+
14713+
/* If either all subtractions underflow or no subtractions
14714+
* underflow, it is okay to set: dst_umin = dst_umin - src_umax,
14715+
* dst_umax = dst_umax - src_umin. Otherwise (some subtractions
14716+
* underflow), set the output bounds to unbounded.
14717+
*/
14718+
min_underflow = check_sub_overflow(*dst_umin, umax_val, dst_umin);
14719+
max_underflow = check_sub_overflow(*dst_umax, umin_val, dst_umax);
14720+
14721+
if (min_underflow && !max_underflow) {
14722+
*dst_umin = 0;
14723+
*dst_umax = U64_MAX;
1468814724
}
1468914725
}
1469014726

tools/testing/selftests/bpf/progs/verifier_bounds.c

Lines changed: 161 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1371,4 +1371,165 @@ __naked void mult_sign_ovf(void)
13711371
__imm(bpf_skb_store_bytes)
13721372
: __clobber_all);
13731373
}
1374+
1375+
SEC("socket")
1376+
__description("64-bit addition, all outcomes overflow")
1377+
__success __log_level(2)
1378+
__msg("5: (0f) r3 += r3 {{.*}} R3_w=scalar(umin=0x4000000000000000,umax=0xfffffffffffffffe)")
1379+
__retval(0)
1380+
__naked void add64_full_overflow(void)
1381+
{
1382+
asm volatile (
1383+
"call %[bpf_get_prandom_u32];"
1384+
"r4 = r0;"
1385+
"r3 = 0xa000000000000000 ll;"
1386+
"r3 |= r4;"
1387+
"r3 += r3;"
1388+
"r0 = 0;"
1389+
"exit"
1390+
:
1391+
: __imm(bpf_get_prandom_u32)
1392+
: __clobber_all);
1393+
}
1394+
1395+
SEC("socket")
1396+
__description("64-bit addition, partial overflow, result in unbounded reg")
1397+
__success __log_level(2)
1398+
__msg("4: (0f) r3 += r3 {{.*}} R3_w=scalar()")
1399+
__retval(0)
1400+
__naked void add64_partial_overflow(void)
1401+
{
1402+
asm volatile (
1403+
"call %[bpf_get_prandom_u32];"
1404+
"r4 = r0;"
1405+
"r3 = 2;"
1406+
"r3 |= r4;"
1407+
"r3 += r3;"
1408+
"r0 = 0;"
1409+
"exit"
1410+
:
1411+
: __imm(bpf_get_prandom_u32)
1412+
: __clobber_all);
1413+
}
1414+
1415+
SEC("socket")
1416+
__description("32-bit addition overflow, all outcomes overflow")
1417+
__success __log_level(2)
1418+
__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=umin=umin32=0x40000000,smax=umax=umax32=0xfffffffe,var_off=(0x0; 0xffffffff))")
1419+
__retval(0)
1420+
__naked void add32_full_overflow(void)
1421+
{
1422+
asm volatile (
1423+
"call %[bpf_get_prandom_u32];"
1424+
"w4 = w0;"
1425+
"w3 = 0xa0000000;"
1426+
"w3 |= w4;"
1427+
"w3 += w3;"
1428+
"r0 = 0;"
1429+
"exit"
1430+
:
1431+
: __imm(bpf_get_prandom_u32)
1432+
: __clobber_all);
1433+
}
1434+
1435+
SEC("socket")
1436+
__description("32-bit addition, partial overflow, result in unbounded u32 bounds")
1437+
__success __log_level(2)
1438+
__msg("4: (0c) w3 += w3 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
1439+
__retval(0)
1440+
__naked void add32_partial_overflow(void)
1441+
{
1442+
asm volatile (
1443+
"call %[bpf_get_prandom_u32];"
1444+
"w4 = w0;"
1445+
"w3 = 2;"
1446+
"w3 |= w4;"
1447+
"w3 += w3;"
1448+
"r0 = 0;"
1449+
"exit"
1450+
:
1451+
: __imm(bpf_get_prandom_u32)
1452+
: __clobber_all);
1453+
}
1454+
1455+
SEC("socket")
1456+
__description("64-bit subtraction, all outcomes underflow")
1457+
__success __log_level(2)
1458+
__msg("6: (1f) r3 -= r1 {{.*}} R3_w=scalar(umin=1,umax=0x8000000000000000)")
1459+
__retval(0)
1460+
__naked void sub64_full_overflow(void)
1461+
{
1462+
asm volatile (
1463+
"call %[bpf_get_prandom_u32];"
1464+
"r1 = r0;"
1465+
"r2 = 0x8000000000000000 ll;"
1466+
"r1 |= r2;"
1467+
"r3 = 0;"
1468+
"r3 -= r1;"
1469+
"r0 = 0;"
1470+
"exit"
1471+
:
1472+
: __imm(bpf_get_prandom_u32)
1473+
: __clobber_all);
1474+
}
1475+
1476+
SEC("socket")
1477+
__description("64-bit subtration, partial overflow, result in unbounded reg")
1478+
__success __log_level(2)
1479+
__msg("3: (1f) r3 -= r2 {{.*}} R3_w=scalar()")
1480+
__retval(0)
1481+
__naked void sub64_partial_overflow(void)
1482+
{
1483+
asm volatile (
1484+
"call %[bpf_get_prandom_u32];"
1485+
"r3 = r0;"
1486+
"r2 = 1;"
1487+
"r3 -= r2;"
1488+
"r0 = 0;"
1489+
"exit"
1490+
:
1491+
: __imm(bpf_get_prandom_u32)
1492+
: __clobber_all);
1493+
}
1494+
1495+
SEC("socket")
1496+
__description("32-bit subtraction overflow, all outcomes underflow")
1497+
__success __log_level(2)
1498+
__msg("5: (1c) w3 -= w1 {{.*}} R3_w=scalar(smin=umin=umin32=1,smax=umax=umax32=0x80000000,var_off=(0x0; 0xffffffff))")
1499+
__retval(0)
1500+
__naked void sub32_full_overflow(void)
1501+
{
1502+
asm volatile (
1503+
"call %[bpf_get_prandom_u32];"
1504+
"w1 = w0;"
1505+
"w2 = 0x80000000;"
1506+
"w1 |= w2;"
1507+
"w3 = 0;"
1508+
"w3 -= w1;"
1509+
"r0 = 0;"
1510+
"exit"
1511+
:
1512+
: __imm(bpf_get_prandom_u32)
1513+
: __clobber_all);
1514+
}
1515+
1516+
SEC("socket")
1517+
__description("32-bit subtration, partial overflow, result in unbounded u32 bounds")
1518+
__success __log_level(2)
1519+
__msg("3: (1c) w3 -= w2 {{.*}} R3_w=scalar(smin=0,smax=umax=0xffffffff,var_off=(0x0; 0xffffffff))")
1520+
__retval(0)
1521+
__naked void sub32_partial_overflow(void)
1522+
{
1523+
asm volatile (
1524+
"call %[bpf_get_prandom_u32];"
1525+
"w3 = w0;"
1526+
"w2 = 1;"
1527+
"w3 -= w2;"
1528+
"r0 = 0;"
1529+
"exit"
1530+
:
1531+
: __imm(bpf_get_prandom_u32)
1532+
: __clobber_all);
1533+
}
1534+
13741535
char _license[] SEC("license") = "GPL";

0 commit comments

Comments
 (0)