Skip to content

Commit abfca01

Browse files
committed
bpf: Factor out htab_elem_value helper()
JIRA: https://issues.redhat.com/browse/RHEL-110274 Conflicts: changed context due to missing upstream commits 4fa8d68 ("bpf: Convert hashtab.c to rqspinlock") and ea5b229 ("bpf: Remove migrate_{disable|enable} in ->map_for_each_callback"). commit ba2b31b Author: Hou Tao <[email protected]> Date: Tue Apr 1 14:22:45 2025 +0800 bpf: Factor out htab_elem_value helper() All hash maps store map key and map value together. The relative offset of the map value compared to the map key is round_up(key_size, 8). Therefore, factor out a common helper htab_elem_value() to calculate the address of the map value instead of duplicating the logic. Acked-by: Andrii Nakryiko <[email protected]> Signed-off-by: Hou Tao <[email protected]> Link: https://lore.kernel.org/r/[email protected] Signed-off-by: Alexei Starovoitov <[email protected]> Signed-off-by: Viktor Malik <[email protected]>
1 parent 684707c commit abfca01

File tree

1 file changed

+30
-34
lines changed

1 file changed

+30
-34
lines changed

kernel/bpf/hashtab.c

Lines changed: 30 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -194,20 +194,25 @@ static bool htab_is_percpu(const struct bpf_htab *htab)
194194
htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
195195
}
196196

197+
static inline void *htab_elem_value(struct htab_elem *l, u32 key_size)
198+
{
199+
return l->key + round_up(key_size, 8);
200+
}
201+
197202
static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
198203
void __percpu *pptr)
199204
{
200-
*(void __percpu **)(l->key + roundup(key_size, 8)) = pptr;
205+
*(void __percpu **)htab_elem_value(l, key_size) = pptr;
201206
}
202207

203208
static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
204209
{
205-
return *(void __percpu **)(l->key + roundup(key_size, 8));
210+
return *(void __percpu **)htab_elem_value(l, key_size);
206211
}
207212

208213
static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
209214
{
210-
return *(void **)(l->key + roundup(map->key_size, 8));
215+
return *(void **)htab_elem_value(l, map->key_size);
211216
}
212217

213218
static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
@@ -234,10 +239,10 @@ static void htab_free_prealloced_timers_and_wq(struct bpf_htab *htab)
234239
elem = get_htab_elem(htab, i);
235240
if (btf_record_has_field(htab->map.record, BPF_TIMER))
236241
bpf_obj_free_timer(htab->map.record,
237-
elem->key + round_up(htab->map.key_size, 8));
242+
htab_elem_value(elem, htab->map.key_size));
238243
if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
239244
bpf_obj_free_workqueue(htab->map.record,
240-
elem->key + round_up(htab->map.key_size, 8));
245+
htab_elem_value(elem, htab->map.key_size));
241246
cond_resched();
242247
}
243248
}
@@ -264,7 +269,8 @@ static void htab_free_prealloced_fields(struct bpf_htab *htab)
264269
cond_resched();
265270
}
266271
} else {
267-
bpf_obj_free_fields(htab->map.record, elem->key + round_up(htab->map.key_size, 8));
272+
bpf_obj_free_fields(htab->map.record,
273+
htab_elem_value(elem, htab->map.key_size));
268274
cond_resched();
269275
}
270276
cond_resched();
@@ -703,7 +709,7 @@ static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
703709
struct htab_elem *l = __htab_map_lookup_elem(map, key);
704710

705711
if (l)
706-
return l->key + round_up(map->key_size, 8);
712+
return htab_elem_value(l, map->key_size);
707713

708714
return NULL;
709715
}
@@ -742,7 +748,7 @@ static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
742748
if (l) {
743749
if (mark)
744750
bpf_lru_node_set_ref(&l->lru_node);
745-
return l->key + round_up(map->key_size, 8);
751+
return htab_elem_value(l, map->key_size);
746752
}
747753

748754
return NULL;
@@ -793,7 +799,7 @@ static void check_and_free_fields(struct bpf_htab *htab,
793799
for_each_possible_cpu(cpu)
794800
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
795801
} else {
796-
void *map_value = elem->key + round_up(htab->map.key_size, 8);
802+
void *map_value = htab_elem_value(elem, htab->map.key_size);
797803

798804
bpf_obj_free_fields(htab->map.record, map_value);
799805
}
@@ -1070,11 +1076,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
10701076
htab_elem_set_ptr(l_new, key_size, pptr);
10711077
} else if (fd_htab_map_needs_adjust(htab)) {
10721078
size = round_up(size, 8);
1073-
memcpy(l_new->key + round_up(key_size, 8), value, size);
1079+
memcpy(htab_elem_value(l_new, key_size), value, size);
10741080
} else {
1075-
copy_map_value(&htab->map,
1076-
l_new->key + round_up(key_size, 8),
1077-
value);
1081+
copy_map_value(&htab->map, htab_elem_value(l_new, key_size), value);
10781082
}
10791083

10801084
l_new->hash = hash;
@@ -1137,7 +1141,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11371141
if (l_old) {
11381142
/* grab the element lock and update value in place */
11391143
copy_map_value_locked(map,
1140-
l_old->key + round_up(key_size, 8),
1144+
htab_elem_value(l_old, key_size),
11411145
value, false);
11421146
return 0;
11431147
}
@@ -1165,7 +1169,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
11651169
* and update element in place
11661170
*/
11671171
copy_map_value_locked(map,
1168-
l_old->key + round_up(key_size, 8),
1172+
htab_elem_value(l_old, key_size),
11691173
value, false);
11701174
ret = 0;
11711175
goto err;
@@ -1251,8 +1255,7 @@ static long htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value
12511255
l_new = prealloc_lru_pop(htab, key, hash);
12521256
if (!l_new)
12531257
return -ENOMEM;
1254-
copy_map_value(&htab->map,
1255-
l_new->key + round_up(map->key_size, 8), value);
1258+
copy_map_value(&htab->map, htab_elem_value(l_new, map->key_size), value);
12561259

12571260
ret = htab_lock_bucket(htab, b, hash, &flags);
12581261
if (ret)
@@ -1533,10 +1536,10 @@ static void htab_free_malloced_timers_and_wq(struct bpf_htab *htab)
15331536
/* We only free timer on uref dropping to zero */
15341537
if (btf_record_has_field(htab->map.record, BPF_TIMER))
15351538
bpf_obj_free_timer(htab->map.record,
1536-
l->key + round_up(htab->map.key_size, 8));
1539+
htab_elem_value(l, htab->map.key_size));
15371540
if (btf_record_has_field(htab->map.record, BPF_WORKQUEUE))
15381541
bpf_obj_free_workqueue(htab->map.record,
1539-
l->key + round_up(htab->map.key_size, 8));
1542+
htab_elem_value(l, htab->map.key_size));
15401543
}
15411544
cond_resched_rcu();
15421545
}
@@ -1652,15 +1655,12 @@ static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
16521655
off += roundup_value_size;
16531656
}
16541657
} else {
1655-
u32 roundup_key_size = round_up(map->key_size, 8);
1658+
void *src = htab_elem_value(l, map->key_size);
16561659

16571660
if (flags & BPF_F_LOCK)
1658-
copy_map_value_locked(map, value, l->key +
1659-
roundup_key_size,
1660-
true);
1661+
copy_map_value_locked(map, value, src, true);
16611662
else
1662-
copy_map_value(map, value, l->key +
1663-
roundup_key_size);
1663+
copy_map_value(map, value, src);
16641664
/* Zeroing special fields in the temp buffer */
16651665
check_and_init_map_value(map, value);
16661666
}
@@ -1715,12 +1715,12 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17151715
bool is_percpu)
17161716
{
17171717
struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1718-
u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
17191718
void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
17201719
void __user *uvalues = u64_to_user_ptr(attr->batch.values);
17211720
void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
17221721
void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
17231722
u32 batch, max_count, size, bucket_size, map_id;
1723+
u32 bucket_cnt, total, key_size, value_size;
17241724
struct htab_elem *node_to_free = NULL;
17251725
u64 elem_map_flags, map_flags;
17261726
struct hlist_nulls_head *head;
@@ -1755,7 +1755,6 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
17551755
return -ENOENT;
17561756

17571757
key_size = htab->map.key_size;
1758-
roundup_key_size = round_up(htab->map.key_size, 8);
17591758
value_size = htab->map.value_size;
17601759
size = round_up(value_size, 8);
17611760
if (is_percpu)
@@ -1847,7 +1846,7 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
18471846
off += size;
18481847
}
18491848
} else {
1850-
value = l->key + roundup_key_size;
1849+
value = htab_elem_value(l, key_size);
18511850
if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
18521851
struct bpf_map **inner_map = value;
18531852

@@ -2098,11 +2097,11 @@ static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
20982097
static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
20992098
{
21002099
struct bpf_iter_seq_hash_map_info *info = seq->private;
2101-
u32 roundup_key_size, roundup_value_size;
21022100
struct bpf_iter__bpf_map_elem ctx = {};
21032101
struct bpf_map *map = info->map;
21042102
struct bpf_iter_meta meta;
21052103
int ret = 0, off = 0, cpu;
2104+
u32 roundup_value_size;
21062105
struct bpf_prog *prog;
21072106
void __percpu *pptr;
21082107

@@ -2112,10 +2111,9 @@ static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
21122111
ctx.meta = &meta;
21132112
ctx.map = info->map;
21142113
if (elem) {
2115-
roundup_key_size = round_up(map->key_size, 8);
21162114
ctx.key = elem->key;
21172115
if (!info->percpu_value_buf) {
2118-
ctx.value = elem->key + roundup_key_size;
2116+
ctx.value = htab_elem_value(elem, map->key_size);
21192117
} else {
21202118
roundup_value_size = round_up(map->value_size, 8);
21212119
pptr = htab_elem_get_ptr(elem, map->key_size);
@@ -2200,7 +2198,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
22002198
struct hlist_nulls_head *head;
22012199
struct hlist_nulls_node *n;
22022200
struct htab_elem *elem;
2203-
u32 roundup_key_size;
22042201
int i, num_elems = 0;
22052202
void __percpu *pptr;
22062203
struct bucket *b;
@@ -2213,7 +2210,6 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
22132210

22142211
is_percpu = htab_is_percpu(htab);
22152212

2216-
roundup_key_size = round_up(map->key_size, 8);
22172213
/* disable migration so percpu value prepared here will be the
22182214
* same as the one seen by the bpf program with bpf_map_lookup_elem().
22192215
*/
@@ -2230,7 +2226,7 @@ static long bpf_for_each_hash_elem(struct bpf_map *map, bpf_callback_t callback_
22302226
pptr = htab_elem_get_ptr(elem, map->key_size);
22312227
val = this_cpu_ptr(pptr);
22322228
} else {
2233-
val = elem->key + roundup_key_size;
2229+
val = htab_elem_value(elem, map->key_size);
22342230
}
22352231
num_elems++;
22362232
ret = callback_fn((u64)(long)map, (u64)(long)key,

0 commit comments

Comments
 (0)