@@ -34,11 +34,11 @@ const volatile u64 numa_cpumasks[MAX_NUMA_NODES][MAX_CPUS / 64];
34
34
const volatile u32 llc_numa_id_map [MAX_LLCS ];
35
35
const volatile u32 cpu_llc_id_map [MAX_CPUS ];
36
36
const volatile u32 nr_layers = 1 ;
37
- const volatile u32 nr_containers = 1 ;
37
+ const volatile u32 nr_cpusets = 1 ;
38
38
const volatile u32 nr_nodes = 32 ; /* !0 for veristat, set during init */
39
39
const volatile u32 nr_llcs = 32 ; /* !0 for veristat, set during init */
40
40
const volatile bool smt_enabled = true;
41
- const volatile bool enable_container = true;
41
+ const volatile bool enable_cpuset = true;
42
42
const volatile bool has_little_cores = true;
43
43
const volatile bool xnuma_preemption = false;
44
44
const volatile s32 __sibling_cpu [MAX_CPUS ];
@@ -55,7 +55,7 @@ const volatile u64 lo_fb_wait_ns = 5000000; /* !0 for veristat */
55
55
const volatile u32 lo_fb_share_ppk = 128 ; /* !0 for veristat */
56
56
const volatile bool percpu_kthread_preempt = true;
57
57
volatile u64 layer_refresh_seq_avgruntime ;
58
- const volatile u64 cpuset_fakemasks [MAX_CONTAINERS ][MAX_CPUS / 64 ];
58
+ const volatile u64 cpuset_fakemasks [MAX_CPUSETS ][MAX_CPUS / 64 ];
59
59
60
60
/* Flag to enable or disable antistall feature */
61
61
const volatile bool enable_antistall = true;
@@ -82,7 +82,7 @@ struct cpumask_box {
82
82
83
83
struct {
84
84
__uint (type , BPF_MAP_TYPE_PERCPU_ARRAY );
85
- __uint (max_entries , MAX_CONTAINERS );
85
+ __uint (max_entries , MAX_CPUSETS );
86
86
__type (key , u32 );
87
87
__type (value , struct cpumask_box );
88
88
} cpuset_cpumask SEC (".maps" );
@@ -1375,10 +1375,10 @@ void BPF_STRUCT_OPS(layered_enqueue, struct task_struct *p, u64 enq_flags)
1375
1375
* without making the whole scheduler node aware and should only be used
1376
1376
* with open layers on non-saturated machines to avoid possible stalls.
1377
1377
*/
1378
- if ((!taskc -> all_cpus_allowed &&
1379
- !(layer -> allow_node_aligned && taskc -> cpus_node_aligned )) ||
1380
- !( enable_container && taskc -> cpus_cpuset_aligned ) ||
1381
- !layer -> nr_cpus ) {
1378
+ if ((!taskc -> all_cpus_allowed &&
1379
+ !(( layer -> allow_node_aligned && taskc -> cpus_node_aligned ) ||
1380
+ ( enable_cpuset && taskc -> cpus_cpuset_aligned )))
1381
+ || !layer -> nr_cpus ) {
1382
1382
1383
1383
taskc -> dsq_id = task_cpuc -> lo_fb_dsq_id ;
1384
1384
/*
@@ -2665,8 +2665,8 @@ static void refresh_cpus_flags(struct task_ctx *taskc,
2665
2665
break ;
2666
2666
}
2667
2667
}
2668
- if (enable_container ) {
2669
- bpf_for (container_id , 0 , nr_containers ) {
2668
+ if (enable_cpuset ) {
2669
+ bpf_for (container_id , 0 , nr_cpusets ) {
2670
2670
struct cpumask_box * box ;
2671
2671
box = bpf_map_lookup_elem (& cpuset_cpumask , & container_id );
2672
2672
if (!box || !box -> mask ) {
@@ -3394,8 +3394,8 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
3394
3394
3395
3395
3396
3396
3397
- if (enable_container ) {
3398
- bpf_for (i , 0 , nr_containers ) {
3397
+ if (enable_cpuset ) {
3398
+ bpf_for (i , 0 , nr_cpusets ) {
3399
3399
cpumask = bpf_cpumask_create ();
3400
3400
3401
3401
if (!cpumask )
@@ -3404,7 +3404,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
3404
3404
3405
3405
bpf_for (j , 0 , MAX_CPUS /64 ) {
3406
3406
// verifier
3407
- if (i < 0 || i >= MAX_CONTAINERS || j < 0 || j >= (MAX_CPUS / 64 )) {
3407
+ if (i < 0 || i >= MAX_CPUSETS || j < 0 || j >= (MAX_CPUS / 64 )) {
3408
3408
bpf_cpumask_release (cpumask );
3409
3409
return -1 ;
3410
3410
}
@@ -3426,7 +3426,7 @@ s32 BPF_STRUCT_OPS_SLEEPABLE(layered_init)
3426
3426
if (tmp_cpuset_cpumask )
3427
3427
bpf_cpumask_release (tmp_cpuset_cpumask );
3428
3428
scx_bpf_error ("cpumask is null" );
3429
- return -1 ;
3429
+ return -1 ;
3430
3430
}
3431
3431
bpf_cpumask_copy (tmp_cpuset_cpumask , cast_mask (cpumask ));
3432
3432
0 commit comments