@@ -52,6 +52,7 @@ const volatile u64 min_open_layer_disallow_preempt_after_ns;
52
52
const volatile u64 lo_fb_wait_ns = 5000000 ; /* !0 for veristat */
53
53
const volatile u32 lo_fb_share_ppk = 128 ; /* !0 for veristat */
54
54
const volatile bool percpu_kthread_preempt = true;
55
+ int active_sticky_mod = 0 ;
55
56
56
57
/* Flag to enable or disable antistall feature */
57
58
const volatile bool enable_antistall = true;
@@ -499,6 +500,11 @@ struct task_ctx {
499
500
u32 qrt_llc_id ;
500
501
501
502
char join_layer [SCXCMD_COMLEN ];
503
+
504
+ #define STICKY_MOD_NR_BUCKETS 8
505
+ u64 sticky_mod_buckets [STICKY_MOD_NR_BUCKETS ];
506
+ u64 sticky_mod_nr_cnt ;
507
+ u64 sticky_mod_start_ns ;
502
508
};
503
509
504
510
struct {
@@ -871,6 +877,48 @@ s32 pick_idle_big_little(struct layer *layer, struct task_ctx *taskc,
871
877
return cpu ;
872
878
}
873
879
880
+ static __always_inline
881
+ s32 pick_sticky_mod_cpu (struct llc_ctx * llc , struct layer * layer , s32 prev_cpu )
882
+ {
883
+ u64 time = bpf_ktime_get_ns ();
884
+ const struct cpumask * cpumask ;
885
+ struct cpu_ctx * cpu_ctx ;
886
+ s32 cpu = -1 ;
887
+ int i ;
888
+
889
+ if (!active_sticky_mod )
890
+ return 0 ;
891
+ bpf_printk ("active!" );
892
+
893
+ cpu_ctx = lookup_cpu_ctx (prev_cpu );
894
+ if (!cpu_ctx )
895
+ goto llc ;
896
+ if (cpu_ctx -> sticky_mod_pred_pct < layer -> sticky_mod_pred_pct )
897
+ goto llc ;
898
+ if (cpu_ctx -> sticky_mod_end_time_ns - time > layer -> sticky_mod_min_ns )
899
+ goto llc ;
900
+ return prev_cpu ;
901
+ llc :
902
+ if (!(cpumask = cast_mask (llc -> cpumask )))
903
+ goto out ;
904
+ bpf_for (i , 0 , nr_possible_cpus ) {
905
+ if (i == prev_cpu )
906
+ continue ;
907
+ if (!bpf_cpumask_test_cpu (i , cpumask ))
908
+ continue ;
909
+ if (!(cpu_ctx = lookup_cpu_ctx (i )))
910
+ continue ;
911
+ if (cpu_ctx -> sticky_mod_pred_pct < layer -> sticky_mod_pred_pct )
912
+ continue ;
913
+ if (cpu_ctx -> sticky_mod_end_time_ns - time > layer -> sticky_mod_min_ns )
914
+ continue ;
915
+ cpu = i ;
916
+ break ;
917
+ }
918
+ out :
919
+ return cpu ;
920
+ }
921
+
874
922
static __always_inline
875
923
s32 pick_idle_cpu (struct task_struct * p , s32 prev_cpu ,
876
924
struct cpu_ctx * cpuc , struct task_ctx * taskc , struct layer * layer ,
@@ -987,6 +1035,9 @@ s32 pick_idle_cpu(struct task_struct *p, s32 prev_cpu,
987
1035
cpu = -1 ;
988
1036
goto out_put ;
989
1037
}
1038
+
1039
+ if ((cpu = pick_sticky_mod_cpu (prev_llcc , layer , prev_cpu )) >= 0 )
1040
+ goto out_put ;
990
1041
}
991
1042
992
1043
/*
@@ -1195,6 +1246,55 @@ static void layer_kick_idle_cpu(struct layer *layer)
1195
1246
scx_bpf_put_idle_cpumask (idle_smtmask );
1196
1247
}
1197
1248
1249
+ SEC ("tp_btf/sched_switch" )
1250
+ int BPF_PROG (layered_sched_switch , bool ignore , struct task_struct * prev , struct task_struct * next )
1251
+ {
1252
+ u64 time = bpf_ktime_get_ns ();
1253
+ u64 duration = time , max = 0 ;
1254
+ u32 beg = 0 , end = 50000 , i ;
1255
+ struct task_ctx * pc , * nc ;
1256
+ struct cpu_ctx * c ;
1257
+ u32 max_i = 0 ;
1258
+
1259
+ if (!active_sticky_mod )
1260
+ return 0 ;
1261
+
1262
+ if (!(pc = lookup_task_ctx_may_fail (prev )))
1263
+ goto next ;
1264
+
1265
+ duration -= pc -> sticky_mod_start_ns ;
1266
+ duration /= 1000 ;
1267
+
1268
+ pc -> sticky_mod_nr_cnt ++ ;
1269
+
1270
+ for (i = 0 ; i < STICKY_MOD_NR_BUCKETS ; i ++ ) {
1271
+ u64 cnt = pc -> sticky_mod_buckets [i ];
1272
+
1273
+ if (duration >= beg && duration <= end ) {
1274
+ pc -> sticky_mod_buckets [i ]++ ;
1275
+ cnt ++ ;
1276
+ }
1277
+ if (max < cnt ) {
1278
+ max = cnt ;
1279
+ max_i = i ;
1280
+ }
1281
+ beg += 50000 ;
1282
+ end += 50000 ;
1283
+ if (i == STICKY_MOD_NR_BUCKETS - 2 )
1284
+ end = -1 ;
1285
+ }
1286
+
1287
+ if (!(c = lookup_cpu_ctx (-1 )))
1288
+ goto next ;
1289
+ c -> sticky_mod_end_time_ns = (max_i + 1 ) * 50000 ;
1290
+ c -> sticky_mod_pred_pct = (max / pc -> sticky_mod_nr_cnt );
1291
+ next :
1292
+ if (!(nc = lookup_task_ctx_may_fail (next )))
1293
+ return 0 ;
1294
+ nc -> sticky_mod_start_ns = time ;
1295
+ return 0 ;
1296
+ }
1297
+
1198
1298
void BPF_STRUCT_OPS (layered_enqueue , struct task_struct * p , u64 enq_flags )
1199
1299
{
1200
1300
struct cpu_ctx * cpuc , * task_cpuc ;
@@ -1718,6 +1818,9 @@ static __always_inline bool try_consume_layer(u32 layer_id, struct cpu_ctx *cpuc
1718
1818
xllc_mig_skipped = true;
1719
1819
continue ;
1720
1820
}
1821
+
1822
+ if (pick_sticky_mod_cpu (remote_llcc , layer , -1 ) >= 0 )
1823
+ continue ;
1721
1824
}
1722
1825
1723
1826
if (scx_bpf_dsq_move_to_local (layer_dsq_id (layer_id , * llc_idp )))
@@ -3174,6 +3277,9 @@ static s32 init_layer(int layer_id)
3174
3277
return ret ;
3175
3278
}
3176
3279
3280
+ if (layer -> sticky_mod_min_ns || layer -> sticky_mod_pred_pct )
3281
+ active_sticky_mod ++ ;
3282
+
3177
3283
return 0 ;
3178
3284
}
3179
3285
0 commit comments