@@ -34,11 +34,11 @@ const volatile u64 numa_cpumasks[MAX_NUMA_NODES][MAX_CPUS / 64];
34
34
const volatile u32 llc_numa_id_map [MAX_LLCS ];
35
35
const volatile u32 cpu_llc_id_map [MAX_CPUS ];
36
36
const volatile u32 nr_layers = 1 ;
37
- const volatile u32 nr_containers = 1 ;
37
+ const volatile u32 nr_cpusets = 1 ;
38
38
const volatile u32 nr_nodes = 32 ; /* !0 for veristat, set during init */
39
39
const volatile u32 nr_llcs = 32 ; /* !0 for veristat, set during init */
40
40
const volatile bool smt_enabled = true;
41
- const volatile bool enable_container = true;
41
+ const volatile bool enable_cpuset = true;
42
42
const volatile bool has_little_cores = true;
43
43
const volatile bool xnuma_preemption = false;
44
44
const volatile s32 __sibling_cpu [MAX_CPUS ];
@@ -54,7 +54,7 @@ const volatile u64 min_open_layer_disallow_preempt_after_ns;
54
54
const volatile u64 lo_fb_wait_ns = 5000000 ; /* !0 for veristat */
55
55
const volatile u32 lo_fb_share_ppk = 128 ; /* !0 for veristat */
56
56
const volatile bool percpu_kthread_preempt = true;
57
- const volatile u64 cpuset_fakemasks [MAX_CONTAINERS ][MAX_CPUS / 64 ];
57
+ const volatile u64 cpuset_fakemasks [MAX_CPUSETS ][MAX_CPUS / 64 ];
58
58
59
59
/* Flag to enable or disable antistall feature */
60
60
const volatile bool enable_antistall = true;
@@ -81,7 +81,7 @@ struct cpumask_box {
81
81
82
82
struct {
83
83
__uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
84
- __uint (max_entries , MAX_CONTAINERS );
84
+ __uint (max_entries , MAX_CPUSETS );
85
85
__type (key , u32 );
86
86
__type (value , struct cpumask_box );
87
87
} cpuset_cpumask SEC (".maps" );
@@ -1349,10 +1349,10 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
1349
1349
* without making the whole scheduler node aware and should only be used
1350
1350
* with open layers on non-saturated machines to avoid possible stalls.
1351
1351
*/
1352
- if ((!taskc -> all_cpus_allowed &&
1353
- !(layer -> allow_node_aligned && taskc -> cpus_node_aligned )) ||
1354
- !( enable_container && taskc -> cpus_cpuset_aligned ) ||
1355
- !layer -> nr_cpus ) {
1352
+ if ((!taskc -> all_cpus_allowed &&
1353
+ !(( layer -> allow_node_aligned && taskc -> cpus_node_aligned ) ||
1354
+ ( enable_cpuset && taskc -> cpus_cpuset_aligned )))
1355
+ || !layer -> nr_cpus ) {
1356
1356
1357
1357
taskc -> dsq_id = task_cpuc -> lo_fb_dsq_id ;
1358
1358
/*
@@ -2617,8 +2617,8 @@ static void refresh_cpus_flags(struct task_ctx *taskc,
2617
2617
break ;
2618
2618
}
2619
2619
}
2620
- if (enable_container ) {
2621
- bpf_for (container_id , 0 , nr_containers ) {
2620
+ if (enable_cpuset ) {
2621
+ bpf_for (container_id , 0 , nr_cpusets ) {
2622
2622
struct cpumask_box * box ;
2623
2623
box = bpf_map_lookup_elem (& cpuset_cpumask , & container_id );
2624
2624
if (!box || !box -> mask ) {
@@ -3338,8 +3338,8 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
3338
3338
3339
3339
3340
3340
3341
- if (enable_container ) {
3342
- bpf_for (i , 0 , nr_containers ) {
3341
+ if (enable_cpuset ) {
3342
+ bpf_for (i , 0 , nr_cpusets ) {
3343
3343
cpumask = bpf_cpumask_create ();
3344
3344
3345
3345
if (!cpumask )
@@ -3348,7 +3348,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
3348
3348
3349
3349
bpf_for (j , 0 , MAX_CPUS /64 ) {
3350
3350
// verifier
3351
- if (i < 0 || i >= MAX_CONTAINERS || j < 0 || j >= (MAX_CPUS / 64 )) {
3351
+ if (i < 0 || i >= MAX_CPUSETS || j < 0 || j >= (MAX_CPUS / 64 )) {
3352
3352
bpf_cpumask_release (cpumask );
3353
3353
return -1 ;
3354
3354
}
@@ -3370,7 +3370,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
3370
3370
if (tmp_cpuset_cpumask )
3371
3371
bpf_cpumask_release (tmp_cpuset_cpumask );
3372
3372
scx_bpf_error ("cpumask is null" );
3373
- return -1 ;
3373
+ return -1 ;
3374
3374
}
3375
3375
bpf_cpumask_copy (tmp_cpuset_cpumask , cast_mask (cpumask ));
3376
3376
0 commit comments