Skip to content

Commit cc09d63

Browse files
fix(freertos-smp): Miscellaneous fixes for granular locks
1 parent e9310aa commit cc09d63

File tree

7 files changed

+291
-348
lines changed

7 files changed

+291
-348
lines changed

event_groups.c

Lines changed: 16 additions & 62 deletions
Original file line numberDiff line numberDiff line change
@@ -87,24 +87,6 @@
8787
#define event_groupsEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus, pxEventBits ) taskEXIT_CRITICAL_FROM_ISR( uxSavedInterruptStatus );
8888
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
8989

90-
/*
91-
* Locks an event group for tasks. Prevents other tasks from accessing the event group but allows
92-
* ISRs to pend access to the event group. Caller cannot be preempted by other tasks
93-
* after locking the event group, thus allowing the caller to execute non-deterministic
94-
* operations.
95-
*/
96-
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
97-
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
98-
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
99-
100-
/*
101-
* Unlocks an event group for tasks. Handles all pended access from ISRs, then reenables
102-
* preemption for the caller.
103-
*/
104-
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
105-
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits ) PRIVILEGED_FUNCTION;
106-
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
107-
10890
/*
10991
* Test the bits set in uxCurrentEventBits to see if the wait condition is met.
11092
* The wait condition is defined by xWaitForAllBits. If xWaitForAllBits is
@@ -129,8 +111,22 @@
129111
* When the task unlocks the event group, all pended access attempts are handled.
130112
*/
131113
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
132-
#define event_groupsLOCK( pxEventBits ) prvLockEventGroupForTasks( pxEventBits )
133-
#define event_groupsUNLOCK( pxEventBits ) prvUnlockEventGroupForTasks( pxEventBits );
114+
#define event_groupsLOCK( pxEventBits ) taskDATA_GROUP_LOCK( &( ( pxEventBits )->xTaskSpinlock ) )
115+
#define event_groupsUNLOCK( pxEventBits ) \
116+
( { \
117+
taskDATA_GROUP_UNLOCK( &( ( pxEventBits )->xTaskSpinlock ) ); \
118+
BaseType_t xAlreadyYielded; \
119+
if( xTaskUnlockCanYield() == pdTRUE ) \
120+
{ \
121+
taskYIELD_WITHIN_API(); \
122+
xAlreadyYielded = pdTRUE; \
123+
} \
124+
else \
125+
{ \
126+
xAlreadyYielded = pdFALSE; \
127+
} \
128+
xAlreadyYielded; \
129+
} )
134130
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
135131
#define event_groupsLOCK( pxEventBits ) vTaskSuspendAll()
136132
#define event_groupsUNLOCK( pxEventBits ) xTaskResumeAll()
@@ -867,48 +863,6 @@
867863
traceRETURN_vEventGroupClearBitsCallback();
868864
}
869865
/*-----------------------------------------------------------*/
870-
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
871-
static void prvLockEventGroupForTasks( EventGroup_t * pxEventBits )
872-
{
873-
/* Disable preemption so that the current task cannot be preempted by another task */
874-
vTaskPreemptionDisable( NULL );
875-
876-
/* Keep holding xTaskSpinlock to prevent tasks on other cores from accessing
877-
* the event group while it is suspended. */
878-
portGET_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
879-
}
880-
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
881-
/*-----------------------------------------------------------*/
882-
883-
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
884-
static BaseType_t prvUnlockEventGroupForTasks( EventGroup_t * pxEventBits )
885-
{
886-
BaseType_t xReturn = pdFALSE;
887-
888-
/* Release the previously held task spinlock */
889-
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxEventBits->xTaskSpinlock ) );
890-
891-
/* Re-enable preemption */
892-
vTaskPreemptionEnable( NULL );
893-
894-
/* Yield if preemption was re-enabled*/
895-
if( xTaskUnlockCanYield() == pdTRUE )
896-
{
897-
taskYIELD_WITHIN_API();
898-
899-
/* Return true as the task was preempted */
900-
xReturn = pdTRUE;
901-
}
902-
else
903-
{
904-
/* Return false as the task was not preempted */
905-
xReturn = pdFALSE;
906-
}
907-
908-
return xReturn;
909-
}
910-
#endif /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
911-
/*-----------------------------------------------------------*/
912866

913867
static BaseType_t prvTestWaitCondition( const EventBits_t uxCurrentEventBits,
914868
const EventBits_t uxBitsToWaitFor,

include/FreeRTOS.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2972,8 +2972,8 @@
29722972
* portTICK_TYPE_IS_ATOMIC was not set - map the critical sections used when
29732973
* the tick count is returned to the standard critical section macros. */
29742974
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
2975-
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock )
2976-
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL_DATA_GROUP( &xTaskSpinlock, &xISRSpinlock )
2975+
#define portTICK_TYPE_ENTER_CRITICAL() kernelENTER_CRITICAL()
2976+
#define portTICK_TYPE_EXIT_CRITICAL() kernelEXIT_CRITICAL()
29772977
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
29782978
#define portTICK_TYPE_ENTER_CRITICAL() portENTER_CRITICAL()
29792979
#define portTICK_TYPE_EXIT_CRITICAL() portEXIT_CRITICAL()

include/task.h

Lines changed: 69 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -299,11 +299,11 @@ typedef enum
299299
{ \
300300
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
301301
/* Task spinlock is always taken first */ \
302-
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \
302+
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
303303
/* Disable interrupts */ \
304304
portDISABLE_INTERRUPTS(); \
305305
/* Take the ISR spinlock next */ \
306-
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
306+
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
307307
/* Increment the critical nesting count */ \
308308
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
309309
} \
@@ -322,11 +322,13 @@ typedef enum
322322
#define taskDATA_GROUP_ENTER_CRITICAL_FROM_ISR( pxISRSpinlock, puxSavedInterruptStatus ) \
323323
do { \
324324
*( puxSavedInterruptStatus ) = portSET_INTERRUPT_MASK_FROM_ISR(); \
325-
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
326-
/* Take the ISR spinlock */ \
327-
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
328-
/* Increment the critical nesting count */ \
329-
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
325+
{ \
326+
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
327+
/* Take the ISR spinlock */ \
328+
portGET_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
329+
/* Increment the critical nesting count */ \
330+
portINCREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
331+
} \
330332
} while( 0 )
331333
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
332334

@@ -339,27 +341,27 @@ typedef enum
339341
* \ingroup GranularLocks
340342
*/
341343
#if ( portUSING_GRANULAR_LOCKS == 1 )
342-
#define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
343-
do { \
344-
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
345-
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
346-
/* Release the ISR spinlock */ \
347-
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
348-
/* Release the task spinlock */ \
349-
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxTaskSpinlock ); \
350-
/* Decrement the critical nesting count */ \
351-
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
352-
/* Enable interrupts only if the critical nesting count is 0 */ \
353-
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
354-
{ \
355-
portENABLE_INTERRUPTS(); \
356-
} \
357-
else \
358-
{ \
359-
mtCOVERAGE_TEST_MARKER(); \
360-
} \
361-
/* Re-enable preemption */ \
362-
vTaskPreemptionEnable( NULL ); \
344+
#define taskDATA_GROUP_EXIT_CRITICAL( pxTaskSpinlock, pxISRSpinlock ) \
345+
do { \
346+
const BaseType_t xCoreID = ( BaseType_t ) portGET_CORE_ID(); \
347+
configASSERT( portGET_CRITICAL_NESTING_COUNT( xCoreID ) > 0U ); \
348+
/* Release the ISR spinlock */ \
349+
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
350+
/* Release the task spinlock */ \
351+
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
352+
/* Decrement the critical nesting count */ \
353+
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
354+
/* Enable interrupts only if the critical nesting count is 0 */ \
355+
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
356+
{ \
357+
portENABLE_INTERRUPTS(); \
358+
} \
359+
else \
360+
{ \
361+
mtCOVERAGE_TEST_MARKER(); \
362+
} \
363+
/* Re-enable preemption */ \
364+
vTaskPreemptionEnable( NULL ); \
363365
} while( 0 )
364366
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
365367

@@ -379,14 +381,52 @@ typedef enum
379381
/* Decrement the critical nesting count */ \
380382
portDECREMENT_CRITICAL_NESTING_COUNT( xCoreID ); \
381383
/* Release the ISR spinlock */ \
382-
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) pxISRSpinlock ); \
384+
portRELEASE_SPINLOCK( xCoreID, ( portSPINLOCK_TYPE * ) ( pxISRSpinlock ) ); \
383385
if( portGET_CRITICAL_NESTING_COUNT( xCoreID ) == 0 ) \
384386
{ \
385387
portCLEAR_INTERRUPT_MASK_FROM_ISR( uxSavedInterruptStatus ); \
386388
} \
387389
} while( 0 )
388390
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
389391

392+
/**
393+
* task. h
394+
*
395+
* Macros to lock a data group (task-level lock only).
396+
*
397+
* \defgroup taskDATA_GROUP_LOCK taskDATA_GROUP_LOCK
398+
* \ingroup GranularLocks
399+
*/
400+
#if ( portUSING_GRANULAR_LOCKS == 1 )
401+
#define taskDATA_GROUP_LOCK( pxTaskSpinlock ) \
402+
do { \
403+
/* Disable preemption while holding the task spinlock. */ \
404+
vTaskPreemptionDisable( NULL ); \
405+
{ \
406+
portGET_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
407+
} \
408+
} while( 0 )
409+
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
410+
411+
/**
412+
* task. h
413+
*
414+
* Macros to unlock a data group (task-level lock only).
415+
*
416+
* \defgroup taskDATA_GROUP_UNLOCK taskDATA_GROUP_UNLOCK
417+
* \ingroup GranularLocks
418+
*/
419+
#if ( portUSING_GRANULAR_LOCKS == 1 )
420+
#define taskDATA_GROUP_UNLOCK( pxTaskSpinlock ) \
421+
do { \
422+
{ \
423+
portRELEASE_SPINLOCK( portGET_CORE_ID(), ( portSPINLOCK_TYPE * ) ( pxTaskSpinlock ) ); \
424+
} \
425+
/* Re-enable preemption after releasing the task spinlock. */ \
426+
vTaskPreemptionEnable( NULL ); \
427+
} while( 0 )
428+
#endif /* #if ( portUSING_GRANULAR_LOCKS == 1 ) */
429+
390430
/*-----------------------------------------------------------
391431
* TASK CREATION API
392432
*----------------------------------------------------------*/

queue.c

Lines changed: 16 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -328,25 +328,23 @@ static void prvInitialiseNewQueue( const UBaseType_t uxQueueLength,
328328
* When the tasks unlocks the queue, all pended access attempts are handled.
329329
*/
330330
#if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) )
331-
#define queueLOCK( pxQueue ) \
332-
do { \
333-
vTaskPreemptionDisable( NULL ); \
334-
prvLockQueue( ( pxQueue ) ); \
335-
portGET_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
331+
#define queueLOCK( pxQueue ) \
332+
do { \
333+
taskDATA_GROUP_LOCK( &( ( pxQueue )->xTaskSpinlock ) ); \
334+
prvLockQueue( ( pxQueue ) ); \
336335
} while( 0 )
337-
#define queueUNLOCK( pxQueue, xYieldAPI ) \
338-
do { \
339-
prvUnlockQueue( ( pxQueue ) ); \
340-
portRELEASE_SPINLOCK( portGET_CORE_ID(), &( pxQueue->xTaskSpinlock ) ); \
341-
vTaskPreemptionEnable( NULL ); \
342-
if( ( xYieldAPI ) == pdTRUE ) \
343-
{ \
344-
taskYIELD_WITHIN_API(); \
345-
} \
346-
else \
347-
{ \
348-
mtCOVERAGE_TEST_MARKER(); \
349-
} \
336+
#define queueUNLOCK( pxQueue, xYieldAPI ) \
337+
do { \
338+
prvUnlockQueue( ( pxQueue ) ); \
339+
taskDATA_GROUP_UNLOCK( &( ( pxQueue )->xTaskSpinlock ) ); \
340+
if( ( xYieldAPI ) == pdTRUE ) \
341+
{ \
342+
taskYIELD_WITHIN_API(); \
343+
} \
344+
else \
345+
{ \
346+
mtCOVERAGE_TEST_MARKER(); \
347+
} \
350348
} while( 0 )
351349
#else /* #if ( ( portUSING_GRANULAR_LOCKS == 1 ) && ( configNUMBER_OF_CORES > 1 ) ) */
352350
#define queueLOCK( pxQueue ) \

0 commit comments

Comments
 (0)