@@ -629,6 +629,14 @@ static BaseType_t prvCreateIdleTasks( void );
629
629
static void prvCheckForRunStateChange ( void );
630
630
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
631
631
632
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
633
+ /*
634
+ * Checks to see if another task moved the current task out of the ready
635
+ * list while it was waiting to enter a lightweight critical section and yields, if so.
636
+ */
637
+ static void prvLightWeightCheckForRunStateChange ( void );
638
+ #endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
639
+
632
640
#if ( configNUMBER_OF_CORES > 1 )
633
641
634
642
/*
@@ -960,6 +968,68 @@ static void prvAddNewTaskToReadyList( TCB_t * pxNewTCB ) PRIVILEGED_FUNCTION;
960
968
961
969
/*-----------------------------------------------------------*/
962
970
971
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
972
+ static void prvLightWeightCheckForRunStateChange ( void )
973
+ {
974
+
975
+ const TCB_t * pxThisTCB ;
976
+ BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID ();
977
+
978
+ /* This must only be called from within a task. */
979
+ portASSERT_IF_IN_ISR ();
980
+
981
+ /* This function is always called with interrupts disabled
982
+ * so this is safe. */
983
+ pxThisTCB = pxCurrentTCBs [ xCoreID ];
984
+
985
+ while ( pxThisTCB -> xTaskRunState == taskTASK_SCHEDULED_TO_YIELD )
986
+ {
987
+ UBaseType_t uxPrevCriticalNesting ;
988
+
989
+ /* We are only here if we just entered a critical section
990
+ * or if we just suspended the scheduler, and another task
991
+ * has requested that we yield.
992
+ *
993
+ * This is slightly complicated since we need to save and restore
994
+ * the suspension and critical nesting counts, as well as release
995
+ * and reacquire the correct locks. And then, do it all over again
996
+ * if our state changed again during the reacquisition. */
997
+ uxPrevCriticalNesting = portGET_CRITICAL_NESTING_COUNT ( xCoreID );
998
+
999
+ if ( uxPrevCriticalNesting > 0U )
1000
+ {
1001
+ portSET_CRITICAL_NESTING_COUNT ( xCoreID , 0U );
1002
+ kernelRELEASE_ISR_LOCK ( xCoreID );
1003
+ }
1004
+ else
1005
+ {
1006
+ /* The scheduler is suspended. uxSchedulerSuspended is updated
1007
+ * only when the task is not requested to yield. */
1008
+ mtCOVERAGE_TEST_MARKER ();
1009
+ }
1010
+
1011
+ portMEMORY_BARRIER ();
1012
+
1013
+ portENABLE_INTERRUPTS ();
1014
+
1015
+ /* Enabling interrupts should cause this core to immediately service
1016
+ * the pending interrupt and yield. After servicing the pending interrupt,
1017
+ * the task needs to re-evaluate its run state within this loop, as
1018
+ * other cores may have requested this task to yield, potentially altering
1019
+ * its run state. */
1020
+
1021
+ portDISABLE_INTERRUPTS ();
1022
+
1023
+ xCoreID = ( BaseType_t ) portGET_CORE_ID ();
1024
+ kernelGET_ISR_LOCK ( xCoreID );
1025
+
1026
+ portSET_CRITICAL_NESTING_COUNT ( xCoreID , uxPrevCriticalNesting );
1027
+ };
1028
+ }
1029
+ #endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
1030
+
1031
+ /*-----------------------------------------------------------*/
1032
+
963
1033
#if ( configNUMBER_OF_CORES > 1 )
964
1034
static void prvYieldForTask ( const TCB_t * pxTCB )
965
1035
{
@@ -2314,7 +2384,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
2314
2384
}
2315
2385
else
2316
2386
{
2317
- mtCOVERAGE_TEST_MARKER ();
2387
+ /* Reset the deferred state change flags */
2388
+ pxTCB -> uxDeferredStateChange &= ~tskDEFERRED_DELETION ;
2318
2389
}
2319
2390
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
2320
2391
@@ -3199,7 +3270,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
3199
3270
3200
3271
traceENTER_vTaskPreemptionDisable ( xTask );
3201
3272
3202
- kernelENTER_CRITICAL ();
3273
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3274
+ vKernelLightWeightEnterCritical ();
3275
+ #else
3276
+ kernelENTER_CRITICAL ();
3277
+ #endif
3203
3278
{
3204
3279
if ( xSchedulerRunning != pdFALSE )
3205
3280
{
@@ -3213,7 +3288,11 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
3213
3288
mtCOVERAGE_TEST_MARKER ();
3214
3289
}
3215
3290
}
3216
- kernelEXIT_CRITICAL ();
3291
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3292
+ vKernelLightWeightExitCritical ();
3293
+ #else
3294
+ kernelEXIT_CRITICAL ();
3295
+ #endif
3217
3296
3218
3297
traceRETURN_vTaskPreemptionDisable ();
3219
3298
}
@@ -3226,10 +3305,15 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
3226
3305
void vTaskPreemptionEnable ( const TaskHandle_t xTask )
3227
3306
{
3228
3307
TCB_t * pxTCB ;
3308
+ UBaseType_t uxDeferredAction = 0U ;
3229
3309
3230
3310
traceENTER_vTaskPreemptionEnable ( xTask );
3231
3311
3232
- kernelENTER_CRITICAL ();
3312
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3313
+ vKernelLightWeightEnterCritical ();
3314
+ #else
3315
+ kernelENTER_CRITICAL ();
3316
+ #endif
3233
3317
{
3234
3318
if ( xSchedulerRunning != pdFALSE )
3235
3319
{
@@ -3245,20 +3329,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
3245
3329
* preemption was disabled. */
3246
3330
if ( pxTCB -> uxDeferredStateChange != 0U )
3247
3331
{
3248
- if ( pxTCB -> uxDeferredStateChange & tskDEFERRED_DELETION )
3249
- {
3250
- vTaskDelete ( xTask );
3251
- }
3252
- else if ( pxTCB -> uxDeferredStateChange & tskDEFERRED_SUSPENSION )
3253
- {
3254
- vTaskSuspend ( xTask );
3255
- }
3256
- else
3257
- {
3258
- mtCOVERAGE_TEST_MARKER ();
3259
- }
3260
-
3261
- pxTCB -> uxDeferredStateChange = 0U ;
3332
+ /* Capture the deferred action to perform outside critical section */
3333
+ uxDeferredAction = pxTCB -> uxDeferredStateChange ;
3262
3334
}
3263
3335
else
3264
3336
{
@@ -3282,7 +3354,28 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
3282
3354
mtCOVERAGE_TEST_MARKER ();
3283
3355
}
3284
3356
}
3285
- kernelEXIT_CRITICAL ();
3357
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
3358
+ vKernelLightWeightExitCritical ();
3359
+ #else
3360
+ kernelEXIT_CRITICAL ();
3361
+ #endif
3362
+
3363
+ /* Handle deferred actions outside critical section */
3364
+ if ( uxDeferredAction != 0U )
3365
+ {
3366
+ if ( uxDeferredAction & tskDEFERRED_DELETION )
3367
+ {
3368
+ vTaskDelete ( xTask );
3369
+ }
3370
+ else if ( uxDeferredAction & tskDEFERRED_SUSPENSION )
3371
+ {
3372
+ vTaskSuspend ( xTask );
3373
+ }
3374
+ else
3375
+ {
3376
+ mtCOVERAGE_TEST_MARKER ();
3377
+ }
3378
+ }
3286
3379
3287
3380
traceRETURN_vTaskPreemptionEnable ();
3288
3381
}
@@ -3320,7 +3413,8 @@ static void prvInitialiseNewTask( TaskFunction_t pxTaskCode,
3320
3413
}
3321
3414
else
3322
3415
{
3323
- mtCOVERAGE_TEST_MARKER ();
3416
+ /* Reset the deferred state change flags */
3417
+ pxTCB -> uxDeferredStateChange &= ~tskDEFERRED_SUSPENSION ;
3324
3418
}
3325
3419
#endif /* configUSE_TASK_PREEMPTION_DISABLE */
3326
3420
@@ -7741,6 +7835,78 @@ static void prvResetNextTaskUnblockTime( void )
7741
7835
#endif /* #if ( configNUMBER_OF_CORES > 1 ) */
7742
7836
/*-----------------------------------------------------------*/
7743
7837
7838
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
7839
+
7840
+ void vKernelLightWeightEnterCritical ( void )
7841
+ {
7842
+ if ( xSchedulerRunning != pdFALSE )
7843
+ {
7844
+ portDISABLE_INTERRUPTS ();
7845
+ {
7846
+ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID ();
7847
+
7848
+ /* Get only the ISR lock, not the task lock */
7849
+ kernelGET_ISR_LOCK ( xCoreID );
7850
+
7851
+ portINCREMENT_CRITICAL_NESTING_COUNT ( xCoreID );
7852
+
7853
+ if ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) == 1U
7854
+ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
7855
+ /* Check for the run state change of the task only if a deferred state change is not pending */
7856
+ && pxCurrentTCB -> uxDeferredStateChange == 0U
7857
+ #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
7858
+ )
7859
+ {
7860
+ prvLightWeightCheckForRunStateChange ();
7861
+ }
7862
+ }
7863
+ }
7864
+ }
7865
+
7866
+ #endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
7867
+ /*-----------------------------------------------------------*/
7868
+
7869
+ #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 )
7870
+
7871
+ void vKernelLightWeightExitCritical ( void )
7872
+ {
7873
+ if ( xSchedulerRunning != pdFALSE )
7874
+ {
7875
+ const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID ();
7876
+
7877
+ if ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) > 0U )
7878
+ {
7879
+ /* Release the ISR lock */
7880
+ kernelRELEASE_ISR_LOCK ( xCoreID );
7881
+
7882
+ portDECREMENT_CRITICAL_NESTING_COUNT ( xCoreID );
7883
+
7884
+ BaseType_t xYieldCurrentTask ;
7885
+
7886
+ xYieldCurrentTask = xTaskUnlockCanYield ();
7887
+
7888
+ /* If the critical nesting count is 0, enable interrupts */
7889
+ if ( portGET_CRITICAL_NESTING_COUNT ( xCoreID ) == 0U )
7890
+ {
7891
+ portENABLE_INTERRUPTS ();
7892
+
7893
+ if ( xYieldCurrentTask != pdFALSE
7894
+ #if ( configUSE_TASK_PREEMPTION_DISABLE == 1 )
7895
+ /* Yield only if no deferred state change is pending */
7896
+ && pxCurrentTCB -> uxDeferredStateChange == 0U
7897
+ #endif /* ( configUSE_TASK_PREEMPTION_DISABLE == 1 ) */
7898
+ )
7899
+ {
7900
+ portYIELD ();
7901
+ }
7902
+ }
7903
+ }
7904
+ }
7905
+ }
7906
+
7907
+ #endif /* #if ( configLIGHTWEIGHT_CRITICAL_SECTION == 1 ) */
7908
+ /*-----------------------------------------------------------*/
7909
+
7744
7910
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
7745
7911
7746
7912
BaseType_t xTaskUnlockCanYield ( void )
0 commit comments