Skip to content

Commit

Permalink
Bug Fix - Adding support for ZyqnMPSoc(A53*4) (FreeRTOS#14)
Browse files Browse the repository at this point in the history
BUG Fix: The program would crash when the gcc -O2/-O3 option was used
Improvements: Make lSpinTryLock&vSpinUnlock inline
  • Loading branch information
yunyafeng authored Jun 26, 2024
1 parent 43fd801 commit 5382412
Show file tree
Hide file tree
Showing 3 changed files with 129 additions and 149 deletions.
127 changes: 126 additions & 1 deletion GCC/CORTEX_A53_64-bit_UltraScale_MPSoC/port.c
Original file line number Diff line number Diff line change
Expand Up @@ -477,7 +477,8 @@ int32_t lReturn;
/*-----------------------------------------------------------*/

/* flag to control tick ISR handling, this is made true just before schedular start */
uint64_t uxPortSchedularRunning = pdFALSE;

volatile uint64_t uxPortSchedularRunning = pdFALSE;

BaseType_t xPortStartScheduler( void )
{
Expand Down Expand Up @@ -780,6 +781,130 @@ void vIRQHandler( uint32_t ulICCIAR )
uxCoreInIsr[xCoreId]--;
}

#define portRTOS_LOCK_COUNT (2u)
#define portMAX_CORE_COUNT configNUMBER_OF_CORES

/* Which core owns the lock */
volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ];
/* Lock count a core owns */
volatile uint64_t ucRecursionCountByLock[ portRTOS_LOCK_COUNT ];
/* Index 0 is used for ISR lock and Index 1 is used for task lock */
uint32_t ulGateWord[ portRTOS_LOCK_COUNT ];


static inline void vSpinUnlock(uint32_t* ulLock)
{
__asm volatile(
"dmb sy\n"
"mov w1, #0\n"
"str w1, [%x0]\n"
"dsb sy\n"
"sev\n"
:
:"r" (ulLock)
: "memory", "w1"
);
}

static inline int32_t lSpinTrylock(uint32_t* ulLock)
{
register int32_t lRet;

__asm volatile(
"1:\n"
"ldxr w1, [%x1]\n"
"cmp w1, #1\n"
"beq 2f\n"
"mov w2, #1\n"
"stxr w1, w2, [%x1]\n"
"cmp w1, #0\n"
"bne 1b\n"
"2:\n"
"mov %w0, w1\n"
:"=r" (lRet)
:"r" (ulLock)
: "memory", "w1", "w2"
);

return lRet;
}

/* Read 64b value shared between cores */
static inline uint64_t uxGet64(volatile uint64_t* x)
{
__asm("dsb sy");
return *x;
}

/* Write 64b value shared between cores */
static inline void vSet64(volatile uint64_t* x, uint64_t value)
{
*x = value;
__asm("dsb sy");
}

void vPortRecursiveLock(uint32_t ulLockNum, BaseType_t uxAcquire)
{
uint32_t ulCoreNum = (uint32_t)portGET_CORE_ID();
uint32_t ulLockBit = 1u << ulLockNum;

/* Lock acquire */
if (uxAcquire)
{
/* Check if spinlock is available */
/* If spinlock is not available check if the core owns the lock */
/* If the core owns the lock wait increment the lock count by the core */
/* If core does not own the lock wait for the spinlock */
if( lSpinTrylock(&ulGateWord[ulLockNum]) != 0)
{
/* Check if the core owns the spinlock */
if( uxGet64(&ucOwnedByCore[ulCoreNum]) & ulLockBit )
{
configASSERT( uxGet64(&ucRecursionCountByLock[ulLockNum]) != 255u);
vSet64(&ucRecursionCountByLock[ulLockNum], (uxGet64(&ucRecursionCountByLock[ulLockNum])+1));
return;
}

/* Preload the gate word into the cache */
uint32_t dummy = ulGateWord[ulLockNum];
dummy++;

while (lSpinTrylock(&ulGateWord[ulLockNum]) != 0)
__asm volatile ("wfe");
}

/* Add barrier to ensure lock is taken before we proceed */
__asm__ __volatile__ ( "dmb sy" ::: "memory" );

/* Assert the lock count is 0 when the spinlock is free and is acquired */
configASSERT(uxGet64(&ucRecursionCountByLock[ulLockNum]) == 0);

/* Set lock count as 1 */
vSet64(&ucRecursionCountByLock[ulLockNum], 1);
/* Set ucOwnedByCore */
vSet64(&ucOwnedByCore[ulCoreNum], (uxGet64(&ucOwnedByCore[ulCoreNum]) | ulLockBit));
}
/* Lock release */
else
{
/* Assert the lock is not free already */
configASSERT( (uxGet64(&ucOwnedByCore[ulCoreNum]) & ulLockBit) != 0 );
configASSERT( uxGet64(&ucRecursionCountByLock[ulLockNum]) != 0 );

/* Reduce ucRecursionCountByLock by 1 */
vSet64(&ucRecursionCountByLock[ulLockNum], (uxGet64(&ucRecursionCountByLock[ulLockNum]) - 1) );

if( !uxGet64(&ucRecursionCountByLock[ulLockNum]) )
{
vSet64(&ucOwnedByCore[ulCoreNum], (uxGet64(&ucOwnedByCore[ulCoreNum]) & ~ulLockBit));
vSpinUnlock(&ulGateWord[ulLockNum]);
__asm volatile("sev");
/* Add barrier to ensure lock is taken before we proceed */
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
}
}
}


#if( configGENERATE_RUN_TIME_STATS == 1 )
/*
Expand Down
40 changes: 0 additions & 40 deletions GCC/CORTEX_A53_64-bit_UltraScale_MPSoC/portASM.S
Original file line number Diff line number Diff line change
Expand Up @@ -517,46 +517,6 @@ Exit_IRQ_No_Context_Switch:
ERET



.align 8


/*
* int32_t lSpinTrylock(uintptr_t ulGateWord);
*/
.global lSpinTrylock
.type lSpinTrylock , %function
lSpinTrylock:
LDXR W1, [X0]
CMP W1, #1 /* is locked already? */
BEQ 1f /* if so, leave with fail */

MOV W2, #1 /* locked = 1 */
STXR W1, W2, [X0]/* if so attempt to grab it */
CMP W1, #0 /* did we get it? zero is yes */

/* if not, loop while in contention */
BNE lSpinTrylock
1:
MOV W0, W1
DMB SY
RET

/*
* void vSpinUnlock(uintptr_t ulGateWord);
*/
.global vSpinUnlock
.type vSpinUnlock , %function
vSpinUnlock:
DMB SY

MOV W1, #0
STR W1, [X0]

DSB SY
SEV /* let everyone know */
RET

pxCurrentTCBConst: .dword pxCurrentTCBs
ullPortTaskHasFPUContextConst: .dword ullPortTaskHasFPUContext
ullMaxAPIPriorityMaskConst: .dword ullMaxAPIPriorityMask
Expand Down
111 changes: 3 additions & 108 deletions GCC/CORTEX_A53_64-bit_UltraScale_MPSoC/portmacro.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,16 +101,6 @@ extern uint64_t ullPortYieldRequired[];
#define portYIELD() __asm volatile ( "SMC 0" ::: "memory" )
#endif

/*
* int32_t lSpinTrylock(uintptr_t ulGateWord);
*/
// .global xPortGetCoreID
// .type xPortGetCoreID , %function
// xPortGetCoreID:
// MRS X0, MPIDR_EL1
// AND X0, X0, #0xFF
// RET


static inline UBaseType_t uxDisableInterrupts()
{
Expand All @@ -119,8 +109,6 @@ static inline UBaseType_t uxDisableInterrupts()
__asm volatile (
"mrs %0, daif\n"
"msr daifset, #2\n"
// "dsb sy\n"
// "isb sy\n"
: "=r" (flags)
:
: "memory"
Expand All @@ -132,10 +120,7 @@ static inline UBaseType_t uxDisableInterrupts()
static inline void vEnableInterrupts()
{
__asm volatile (
"mrs x0, daif\n"
"msr daifclr, #2\n"
// "dsb sy\n"
// "isb sy\n"
:
:
: "memory"
Expand All @@ -150,11 +135,9 @@ static inline void vRestoreInterrupts(UBaseType_t flags)
"bic x1, x1, #128\n"
"orr x1, x1, x2\n"
"msr daif, x1\n"
// "dsb sy\n"
// "isb sy\n"
:
: "r" (flags)
: "memory"
: "x0","x1","x2","memory"
);
}

Expand All @@ -168,7 +151,7 @@ static inline BaseType_t xPortGetCoreID()
"and %0, x0, #0xff\n"
: "=r" (xCoreID)
:
: "memory"
: "memory", "x0"
);

return xCoreID;
Expand Down Expand Up @@ -318,97 +301,9 @@ static inline void vAssertIfInIsr()
*----------------------------------------------------------*/
#define ISR_LOCK (0u)
#define TASK_LOCK (1u)
#define portRTOS_LOCK_COUNT (2u)
#define portMAX_CORE_COUNT configNUMBER_OF_CORES

/* Which core owns the lock */
volatile uint64_t ucOwnedByCore[ portMAX_CORE_COUNT ];
/* Lock count a core owns */
volatile uint64_t ucRecursionCountByLock[ portRTOS_LOCK_COUNT ];
/* Index 0 is used for ISR lock and Index 1 is used for task lock */
uint32_t ulGateWord[ portRTOS_LOCK_COUNT ];

void vSpinLock(uint32_t* ulGateWord);
int32_t lSpinTrylock(uint32_t* ulGateWord);
void vSpinUnlock(uint32_t* ulGateWord);

/* Read 64b value shared between cores */
static inline uint64_t uxGet64(volatile uint64_t* x)
{
__asm("dsb sy");
return *x;
}

/* Write 64b value shared between cores */
static inline void vSet64(volatile uint64_t* x, uint64_t value)
{
*x = value;
__asm("dsb sy");
}

// TODO inline spinlocks
extern void vPortRecursiveLock(uint32_t ulLockNum, BaseType_t uxAcquire);

static inline void vPortRecursiveLock(uint32_t ulLockNum, BaseType_t uxAcquire)
{
uint32_t ulCoreNum = (uint32_t)portGET_CORE_ID();
uint32_t ulLockBit = 1u << ulLockNum;

/* Lock acquire */
if (uxAcquire)
{
/* Check if spinlock is available */
/* If spinlock is not available check if the core owns the lock */
/* If the core owns the lock wait increment the lock count by the core */
/* If core does not own the lock wait for the spinlock */
if( lSpinTrylock(&ulGateWord[ulLockNum]) != 0)
{
/* Check if the core owns the spinlock */
if( uxGet64(&ucOwnedByCore[ulCoreNum]) & ulLockBit )
{
configASSERT( uxGet64(&ucRecursionCountByLock[ulLockNum]) != 255u);
vSet64(&ucRecursionCountByLock[ulLockNum], (uxGet64(&ucRecursionCountByLock[ulLockNum])+1));
return;
}

/* Preload the gate word into the cache */
uint32_t dummy = ulGateWord[ulLockNum];
dummy++;

while (lSpinTrylock(&ulGateWord[ulLockNum]) != 0)
__asm volatile ("wfe");
}

/* Add barrier to ensure lock is taken before we proceed */
__asm__ __volatile__ ( "dmb sy" ::: "memory" );

/* Assert the lock count is 0 when the spinlock is free and is acquired */
configASSERT(uxGet64(&ucRecursionCountByLock[ulLockNum]) == 0);

/* Set lock count as 1 */
vSet64(&ucRecursionCountByLock[ulLockNum], 1);
/* Set ucOwnedByCore */
vSet64(&ucOwnedByCore[ulCoreNum], (uxGet64(&ucOwnedByCore[ulCoreNum]) | ulLockBit));
}
/* Lock release */
else
{
/* Assert the lock is not free already */
configASSERT( (uxGet64(&ucOwnedByCore[ulCoreNum]) & ulLockBit) != 0 );
configASSERT( uxGet64(&ucRecursionCountByLock[ulLockNum]) != 0 );

/* Reduce ucRecursionCountByLock by 1 */
vSet64(&ucRecursionCountByLock[ulLockNum], (uxGet64(&ucRecursionCountByLock[ulLockNum]) - 1) );

if( !uxGet64(&ucRecursionCountByLock[ulLockNum]) )
{
vSet64(&ucOwnedByCore[ulCoreNum], (uxGet64(&ucOwnedByCore[ulCoreNum]) & ~ulLockBit));
vSpinUnlock(&ulGateWord[ulLockNum]);
__asm volatile("sev");
/* Add barrier to ensure lock is taken before we proceed */
__asm__ __volatile__ ( "dmb sy" ::: "memory" );
}
}
}

#define portRELEASE_ISR_LOCK() vPortRecursiveLock(ISR_LOCK, pdFALSE)
#define portGET_ISR_LOCK() vPortRecursiveLock(ISR_LOCK, pdTRUE)
Expand Down

0 comments on commit 5382412

Please sign in to comment.