Skip to content

Commit

Permalink
arch: riscv: core: run zephyr completely in user/supervisor mode
Browse files Browse the repository at this point in the history
Before that fix, zephyr was able to start only in machine mode.
With that fix, zephyr can start (as guest) in user or supervisor mode.
Fixes zephyrproject-rtos#68133

Signed-off-by: Sven Ginka <[email protected]>
  • Loading branch information
tswaehn committed Aug 8, 2024
1 parent ed018f8 commit c1e4186
Show file tree
Hide file tree
Showing 13 changed files with 121 additions and 42 deletions.
12 changes: 12 additions & 0 deletions arch/riscv/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -206,6 +206,18 @@ config RISCV_GENERIC_TOOLCHAIN
Allow SOCs that have custom extended riscv ISA to still
compile with generic riscv32 toolchain.

config RISCV_KERNEL_IN_USER_MODE
bool "Start zephyr in user mode"
help
Replaces CSR access from machine mode to user mode.
Allows zephyr to run as guest in user mode only

config RISCV_KERNEL_IN_SUPERVISOR_MODE
bool "Start zephyr in supervisor mode"
help
Replaces CSR access from machine mode to supervisor mode.
Allows zephyr to run as guest in supervisor mode only

config GEN_ISR_TABLES
default y

Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/core/cpu_idle.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ void __weak arch_cpu_idle(void)
{
sys_trace_idle();
__asm__ volatile("wfi");
irq_unlock(MSTATUS_IEN);
irq_unlock(XSTATUS_IEN);
}

void __weak arch_cpu_atomic_idle(unsigned int key)
Expand Down
4 changes: 2 additions & 2 deletions arch/riscv/core/fatal.c
Original file line number Diff line number Diff line change
Expand Up @@ -79,8 +79,8 @@ FUNC_NORETURN void z_riscv_fatal_error_csf(unsigned int reason, const struct arc
#endif /* CONFIG_RISCV_ISA_RV32E */
LOG_ERR(" sp: " PR_REG, z_riscv_get_sp_before_exc(esf));
LOG_ERR(" ra: " PR_REG, esf->ra);
LOG_ERR(" mepc: " PR_REG, esf->mepc);
LOG_ERR("mstatus: " PR_REG, esf->mstatus);
LOG_ERR(" xepc: " PR_REG, esf->xepc);
LOG_ERR("xstatus: " PR_REG, esf->xstatus);
LOG_ERR("");
}

Expand Down
8 changes: 4 additions & 4 deletions arch/riscv/core/irq_manage.c
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ LOG_MODULE_DECLARE(os, CONFIG_KERNEL_LOG_LEVEL);

FUNC_NORETURN void z_irq_spurious(const void *unused)
{
unsigned long mcause;
unsigned long xcause;

ARG_UNUSED(unused);

mcause = csr_read(mcause);
xcause = csr_read(xcause);

mcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;
xcause &= CONFIG_RISCV_MCAUSE_EXCEPTION_MASK;

LOG_ERR("Spurious interrupt detected! IRQ: %ld", mcause);
LOG_ERR("Spurious interrupt detected! IRQ: %ld", xcause);
#if defined(CONFIG_RISCV_HAS_PLIC)
if (mcause == RISCV_IRQ_MEXT) {
unsigned int save_irq = riscv_plic_get_irq();
Expand Down
35 changes: 18 additions & 17 deletions arch/riscv/core/isr.S
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
* Copyright (c) 2016 Jean-Paul Etienne <[email protected]>
* Copyright (c) 2018 Foundries.io Ltd
* Copyright (c) 2020 BayLibre, SAS
* Copyright (c) 2024 sensry GmbH
*
* SPDX-License-Identifier: Apache-2.0
*/
Expand Down Expand Up @@ -189,12 +190,12 @@ SECTION_FUNC(exception.entry, _isr_wrapper)
get_current_cpu s0

/* Save MEPC register */
csrr t0, mepc
sr t0, __struct_arch_esf_mepc_OFFSET(sp)
csrr t0, xepc
sr t0, __struct_arch_esf_xepc_OFFSET(sp)

/* Save MSTATUS register */
csrr t2, mstatus
sr t2, __struct_arch_esf_mstatus_OFFSET(sp)
csrr t2, xstatus
sr t2, __struct_arch_esf_xstatus_OFFSET(sp)

#if defined(CONFIG_FPU_SHARING)
/* determine if FPU access was disabled */
Expand Down Expand Up @@ -319,7 +320,7 @@ no_fp: /* increment _current->arch.exception_depth */
jal ra, __soc_is_irq
bnez a0, is_interrupt
#else
csrr t0, mcause
csrr t0, xcause
srli t0, t0, RISCV_MCAUSE_IRQ_POS
bnez t0, is_interrupt
#endif
Expand All @@ -329,7 +330,7 @@ no_fp: /* increment _current->arch.exception_depth */
* perform a context-switch or an IRQ offload. Otherwise call _Fault
* to report the exception.
*/
csrr t0, mcause
csrr t0, xcause
li t2, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
and t0, t0, t2

Expand Down Expand Up @@ -385,9 +386,9 @@ is_kernel_syscall:
* It's safe to always increment by 4, even with compressed
* instructions, because the ecall instruction is always 4 bytes.
*/
lr t0, __struct_arch_esf_mepc_OFFSET(sp)
lr t0, __struct_arch_esf_xepc_OFFSET(sp)
addi t0, t0, 4
sr t0, __struct_arch_esf_mepc_OFFSET(sp)
sr t0, __struct_arch_esf_xepc_OFFSET(sp)

#ifdef CONFIG_PMP_STACK_GUARD
/* Re-activate PMP for m-mode */
Expand Down Expand Up @@ -505,9 +506,9 @@ is_user_syscall:
* Same as for is_kernel_syscall: increment saved MEPC by 4 to
* prevent triggering the same ecall again upon exiting the ISR.
*/
lr t1, __struct_arch_esf_mepc_OFFSET(sp)
lr t1, __struct_arch_esf_xepc_OFFSET(sp)
addi t1, t1, 4
sr t1, __struct_arch_esf_mepc_OFFSET(sp)
sr t1, __struct_arch_esf_xepc_OFFSET(sp)

/* Restore argument registers from user stack */
lr a0, __struct_arch_esf_a0_OFFSET(sp)
Expand Down Expand Up @@ -565,7 +566,7 @@ is_interrupt:
* If we came from userspace then we need to reconfigure the
* PMP for kernel mode stack guard.
*/
lr t0, __struct_arch_esf_mstatus_OFFSET(sp)
lr t0, __struct_arch_esf_xstatus_OFFSET(sp)
li t1, MSTATUS_MPP
and t0, t0, t1
bnez t0, 1f
Expand Down Expand Up @@ -609,7 +610,7 @@ on_irq_stack:
#endif

/* Get IRQ causing interrupt */
csrr a0, mcause
csrr a0, xcause
li t0, CONFIG_RISCV_MCAUSE_EXCEPTION_MASK
and a0, a0, t0

Expand Down Expand Up @@ -714,10 +715,10 @@ fp_trap_exit:
#endif

/* Restore MEPC and MSTATUS registers */
lr t0, __struct_arch_esf_mepc_OFFSET(sp)
lr t2, __struct_arch_esf_mstatus_OFFSET(sp)
csrw mepc, t0
csrw mstatus, t2
lr t0, __struct_arch_esf_xepc_OFFSET(sp)
lr t2, __struct_arch_esf_xstatus_OFFSET(sp)
csrw xepc, t0
csrw xstatus, t2

#ifdef CONFIG_USERSPACE
/*
Expand Down Expand Up @@ -775,4 +776,4 @@ fp_trap_exit:

#endif /* CONFIG_RISCV_SOC_HAS_ISR_STACKING */

mret
xret
6 changes: 3 additions & 3 deletions arch/riscv/core/offsets/offsets.c
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,9 @@
* structures.
*/

#include <zephyr/arch/exception.h>
#include <zephyr/kernel.h>
#include <kernel_arch_data.h>
#include <zephyr/arch/exception.h>
#include <gen_offset.h>

#ifdef CONFIG_RISCV_SOC_CONTEXT_SAVE
Expand Down Expand Up @@ -109,8 +109,8 @@ GEN_OFFSET_STRUCT(arch_esf, a6);
GEN_OFFSET_STRUCT(arch_esf, a7);
#endif /* !CONFIG_RISCV_ISA_RV32E */

GEN_OFFSET_STRUCT(arch_esf, mepc);
GEN_OFFSET_STRUCT(arch_esf, mstatus);
GEN_OFFSET_STRUCT(arch_esf, xepc);
GEN_OFFSET_STRUCT(arch_esf, xstatus);

GEN_OFFSET_STRUCT(arch_esf, s0);

Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/core/reset.S
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ SECTION_FUNC(reset, __reset)
* the C domain
*/
SECTION_FUNC(TEXT, __initialize)
csrr a0, mhartid
csrr a0, xhartid
li t0, CONFIG_RV_BOOT_HART
beq a0, t0, boot_first_core
j boot_secondary_core
Expand Down
2 changes: 1 addition & 1 deletion arch/riscv/core/stacktrace.c
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ static void walk_stackframe(stack_trace_callback_fn cb, void *cookie, const stru
if (esf != NULL) {
/* Unwind the provided exception stack frame */
sp = z_riscv_get_sp_before_exc(esf);
ra = esf->mepc;
ra = esf->xepc;
} else if ((csf == NULL) || (csf == &_current->callee_saved)) {
/* Unwind current thread (default case when nothing is provided ) */
sp = current_stack_pointer;
Expand Down
6 changes: 3 additions & 3 deletions arch/riscv/core/thread.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
* counter will be restored following the MEPC value set within the
* thread stack.
*/
stack_init->mstatus = MSTATUS_DEF_RESTORE;
stack_init->xstatus = XSTATUS_DEF_RESTORE;

#if defined(CONFIG_FPU_SHARING)
/* thread birth happens through the exception return path */
Expand All @@ -83,11 +83,11 @@ void arch_new_thread(struct k_thread *thread, k_thread_stack_t *stack,
if (IS_ENABLED(CONFIG_USERSPACE)
&& (thread->base.user_options & K_USER)) {
/* User thread */
stack_init->mepc = (unsigned long)k_thread_user_mode_enter;
stack_init->xepc = (unsigned long)k_thread_user_mode_enter;

} else {
/* Supervisor thread */
stack_init->mepc = (unsigned long)z_thread_entry;
stack_init->xepc = (unsigned long)z_thread_entry;

#if defined(CONFIG_PMP_STACK_GUARD)
/* Enable PMP in mstatus.MPRV mode for RISC-V machine mode
Expand Down
20 changes: 15 additions & 5 deletions include/zephyr/arch/riscv/arch.h
Original file line number Diff line number Diff line change
Expand Up @@ -166,6 +166,12 @@
#define MSTATUS_FS_CLEAN (2UL << 13)
#define MSTATUS_FS_DIRTY (3UL << 13)

#define SSTATUS_IEN (1UL << 1)
#define SSTATUS_UPIE_IEN (1UL << 5)

#define USTATUS_IEN (1UL << 0)
#define USTATUS_UPIE_IEN (1UL << 4)

/* This comes from openisa_rv32m1, but doesn't seem to hurt on other
* platforms:
* - Preserve machine privileges in MPP. If you see any documentation
Expand All @@ -175,6 +181,8 @@
* by setting MPIE now, so it will be copied into IE on mret.
*/
#define MSTATUS_DEF_RESTORE (MSTATUS_MPP_M | MSTATUS_MPIE_EN)
#define SSTATUS_DEF_RESTORE (SSTATUS_UPIE_IEN | SSTATUS_IEN)
#define USTATUS_DEF_RESTORE (USTATUS_UPIE_IEN | USTATUS_IEN)

#ifndef _ASMLANGUAGE
#include <zephyr/sys/util.h>
Expand Down Expand Up @@ -238,9 +246,9 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
#else
unsigned int key;

__asm__ volatile ("csrrc %0, mstatus, %1"
__asm__ volatile ("csrrc %0, %1, %2"
: "=r" (key)
: "rK" (MSTATUS_IEN)
: "i" (XSTATUS), "rK" (XSTATUS_IEN)
: "memory");

return key;
Expand All @@ -254,21 +262,23 @@ static ALWAYS_INLINE unsigned int arch_irq_lock(void)
static ALWAYS_INLINE void arch_irq_unlock(unsigned int key)
{
#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
extern void z_soc_irq_unlock(unsigned int key);
z_soc_irq_unlock(key);
#else
__asm__ volatile ("csrs mstatus, %0"
__asm__ volatile ("csrs %0, %1"
:
: "r" (key & MSTATUS_IEN)
: "i" (XSTATUS), "r" (key & XSTATUS_IEN)
: "memory");
#endif
}

static ALWAYS_INLINE bool arch_irq_unlocked(unsigned int key)
{
#ifdef CONFIG_RISCV_SOC_HAS_CUSTOM_IRQ_LOCK_OPS
extern bool z_soc_irq_unlocked(unsigned int key);
return z_soc_irq_unlocked(key);
#else
return (key & MSTATUS_IEN) != 0;
return (key & XSTATUS_IEN) != 0;
#endif
}

Expand Down
56 changes: 56 additions & 0 deletions include/zephyr/arch/riscv/csr.h
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2020 Michael Schaffner
* Copyright (c) 2020 BayLibre, SAS
* Copyright (c) 2024 sensry GmbH
*
* SPDX-License-Identifier: SHL-0.51
* SPDX-License-Identifier: Apache-2.0
Expand Down Expand Up @@ -233,4 +234,59 @@
: "memory"); \
})

/* uhartid -- user hardware thread id, seems to be not yet defined in toolchain */
#ifdef CONFIG_RISCV_KERNEL_IN_USER_MODE
/* zephyr runs in user mode */

/* inline assembly defines */
#define XSTATUS_IEN USTATUS_IEN
#define XSTATUS_DEF_RESTORE USTATUS_DEF_RESTORE
#define XSTATUS 0x000

/* register definition for assembly */
#define uhartid 0x014

#define xhartid uhartid
#define xscratch uscratch
#define xstatus ustatus
#define xepc uepc
#define xcause ucause
#define xret uret

#elif CONFIG_RISCV_KERNEL_IN_SUPERVISOR_MODE
/* zephyr runs in supervisor mode */

/* inline assembly defines */
#define XSTATUS_IEN SSTATUS_IEN
#define XSTATUS_DEF_RESTORE SSTATUS_DEF_RESTORE
#define XSTATUS 0x000

/* register definition for assembly */
#define shartid 0x7D2

#define xhartid shartid
#define xscratch sscratch
#define xstatus sstatus
#define xepc 0x141
#define xcause scause
#define xret sret

#else
/* default: zephyr runs in machine mode */

/* inline assembly defines */
#define XSTATUS_IEN MSTATUS_IEN
#define XSTATUS_DEF_RESTORE MSTATUS_DEF_RESTORE
#define XSTATUS 0x300

/* register definition for assembly */
#define xhartid mhartid
#define xscratch mscratch
#define xstatus mstatus
#define xepc mepc
#define xcause mcause
#define xret mret

#endif

#endif /* CSR_H_ */
4 changes: 2 additions & 2 deletions include/zephyr/arch/riscv/exception.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ struct arch_esf {
unsigned long a7; /* function argument */
#endif /* !CONFIG_RISCV_ISA_RV32E */

unsigned long mepc; /* machine exception program counter */
unsigned long mstatus; /* machine status register */
unsigned long xepc; /* machine exception program counter */
unsigned long xstatus; /* machine status register */

unsigned long s0; /* callee-saved s0 */

Expand Down
6 changes: 3 additions & 3 deletions soc/nordic/common/vpr/soc_isr_stacking.h
Original file line number Diff line number Diff line change
Expand Up @@ -91,16 +91,16 @@

#define STORE_SP_ALIGN_BIT_FROM_MEPC \
addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \
lr t0, __struct_arch_esf_mepc_OFFSET(sp); \
lr t0, __struct_arch_esf_xepc_OFFSET(sp); \
andi t0, t0, MEPC_SP_ALIGN_BIT_MASK; \
sr t0, __soc_esf_t_sp_align_OFFSET(t1)

#define RESTORE_SP_ALIGN_BIT_TO_MEPC \
addi t1, sp, __struct_arch_esf_soc_context_OFFSET; \
lr t0, __soc_esf_t_sp_align_OFFSET(t1); \
lr t1, __struct_arch_esf_mepc_OFFSET(sp); \
lr t1, __struct_arch_esf_xepc_OFFSET(sp); \
or t2, t1, t0; \
sr t2, __struct_arch_esf_mepc_OFFSET(sp)
sr t2, __struct_arch_esf_xepc_OFFSET(sp)

#define SOC_ISR_SW_STACKING \
csrw mscratch, t0; \
Expand Down

0 comments on commit c1e4186

Please sign in to comment.