From 5c718542e3b60ad48b91fa297b07c754bfaa3fed Mon Sep 17 00:00:00 2001 From: Alvin Chang Date: Sun, 18 Aug 2024 19:28:08 +0800 Subject: [PATCH] core: riscv: Remove thread_exit_user_mode() Currently, the user mode abort and some system calls return to kernel mode by thread_exit_user_mode(). Although this function creates a shorter path to return to kernel mode, it leads to some problems because the function does not update the core local flags. Especially when CFG_CORE_DEBUG_CHECK_STACKS=y, some checks will fail due to wrong type of stack recorded in the core local flags. Fix it by removing thread_exit_user_mode(). So that the core local flags can be correctly updated in the common trap handler. Signed-off-by: Alvin Chang Reviewed-by: Yu Chien Peter Lin --- .../include/kernel/thread_private_arch.h | 4 - core/arch/riscv/kernel/abort.c | 7 +- core/arch/riscv/kernel/thread_arch.c | 15 ++-- core/arch/riscv/kernel/thread_rv.S | 83 +++++++++---------- 4 files changed, 49 insertions(+), 60 deletions(-) diff --git a/core/arch/riscv/include/kernel/thread_private_arch.h b/core/arch/riscv/include/kernel/thread_private_arch.h index 4ba362fcd41..4770619caa4 100644 --- a/core/arch/riscv/include/kernel/thread_private_arch.h +++ b/core/arch/riscv/include/kernel/thread_private_arch.h @@ -128,10 +128,6 @@ static inline void thread_rpc(uint32_t rv[THREAD_RPC_NUM_ARGS]) } void thread_scall_handler(struct thread_scall_regs *regs); -void thread_exit_user_mode(unsigned long a0, unsigned long a1, - unsigned long a2, unsigned long a3, - unsigned long sp, unsigned long pc, - unsigned long status); #endif /*__ASSEMBLER__*/ #endif /*__KERNEL_THREAD_PRIVATE_ARCH_H*/ diff --git a/core/arch/riscv/kernel/abort.c b/core/arch/riscv/kernel/abort.c index 0b9c5391c7e..c49a2930513 100644 --- a/core/arch/riscv/kernel/abort.c +++ b/core/arch/riscv/kernel/abort.c @@ -236,13 +236,10 @@ static void handle_user_mode_panic(struct abort_info *ai) ai->regs->a0 = TEE_ERROR_TARGET_DEAD; ai->regs->a1 = true; ai->regs->a2 = 0xdeadbeef; - ai->regs->ra = (vaddr_t)thread_unwind_user_mode; + ai->regs->epc = (vaddr_t)thread_unwind_user_mode; ai->regs->sp = thread_get_saved_thread_sp(); ai->regs->status = xstatus_for_xret(true, PRV_S); - - thread_exit_user_mode(ai->regs->a0, ai->regs->a1, ai->regs->a2, - ai->regs->a3, ai->regs->sp, ai->regs->ra, - ai->regs->status); + ai->regs->ie = 0; } #ifdef CFG_WITH_VFP diff --git a/core/arch/riscv/kernel/thread_arch.c b/core/arch/riscv/kernel/thread_arch.c index 72c0da02922..53df6a6dd3e 100644 --- a/core/arch/riscv/kernel/thread_arch.c +++ b/core/arch/riscv/kernel/thread_arch.c @@ -98,8 +98,9 @@ static void thread_lazy_restore_ns_vfp(void) static void setup_unwind_user_mode(struct thread_scall_regs *regs) { - regs->ra = (uintptr_t)thread_unwind_user_mode; + regs->epc = (uintptr_t)thread_unwind_user_mode; regs->status = xstatus_for_xret(true, PRV_S); + regs->ie = 0; /* * We are going to exit user mode. The stack pointer must be set as the * original value it had before allocating space of scall "regs" and @@ -135,11 +136,15 @@ void thread_scall_handler(struct thread_scall_regs *regs) assert(sess && sess->handle_scall); - if (!sess->handle_scall(regs)) { + if (sess->handle_scall(regs)) { + /* + * We're about to switch back to next instruction of ecall in + * user-mode + */ + regs->epc += 4; + } else { + /* We're returning from __thread_enter_user_mode() */ setup_unwind_user_mode(regs); - thread_exit_user_mode(regs->a0, regs->a1, regs->a2, - regs->a3, regs->sp, regs->ra, - regs->status); } } diff --git a/core/arch/riscv/kernel/thread_rv.S b/core/arch/riscv/kernel/thread_rv.S index d0e216e3f82..0952a5e0cae 100644 --- a/core/arch/riscv/kernel/thread_rv.S +++ b/core/arch/riscv/kernel/thread_rv.S @@ -472,27 +472,35 @@ ecall_from_user: addi t0, sp, THREAD_SCALL_REGS_SIZE store_xregs a0, THREAD_CTX_KERN_SP, REG_T0 - /* - * We are returning to U-Mode, on return, the program counter - * is set to xsepc (pc=xepc), we add 4 (size of an instruction) - * to continue to next instruction. - */ + /* Restore XEPC */ load_xregs sp, THREAD_SCALL_REG_EPC, REG_T0 - addi t0, t0, 4 csrw CSR_XEPC, t0 - /* Restore XIE */ load_xregs sp, THREAD_SCALL_REG_IE, REG_T0 csrw CSR_XIE, t0 /* Restore XSTATUS */ load_xregs sp, THREAD_SCALL_REG_STATUS, REG_T0 csrw CSR_XSTATUS, t0 - /* Set scratch as thread_core_local */ + /* Check previous privilege mode by status.SPP */ + b_if_prev_priv_is_u t0, 1f + /* + * We are going to XRET to kernel mode. + * XSCRATCH is already zero to indicate that we are in kernel mode. + * We must keep kernel gp & tp, so skip restoring user gp & tp. + */ + j 2f +1: + /* + * We are going to XRET to user mode. + * XSCRATCH must be tp(thread_core_local) to be used in next trap. + * We also need to restore user gp & tp + */ csrw CSR_XSCRATCH, tp - /* Restore caller-saved registers */ - load_xregs sp, THREAD_SCALL_REG_RA, REG_RA load_xregs sp, THREAD_SCALL_REG_GP, REG_GP load_xregs sp, THREAD_SCALL_REG_TP, REG_TP +2: + /* Restore remaining caller-saved registers */ + load_xregs sp, THREAD_SCALL_REG_RA, REG_RA load_xregs sp, THREAD_SCALL_REG_T0, REG_T0, REG_T2 load_xregs sp, THREAD_SCALL_REG_A0, REG_A0, REG_A7 load_xregs sp, THREAD_SCALL_REG_T3, REG_T3, REG_T6 @@ -586,18 +594,32 @@ abort_from_user: /* Restore XSTATUS */ load_xregs sp, THREAD_ABT_REG_STATUS, REG_T0 csrw CSR_XSTATUS, t0 - /* Set scratch as thread_core_local */ - csrw CSR_XSCRATCH, tp /* Update core local flags */ lw a0, THREAD_CORE_LOCAL_FLAGS(tp) srli a0, a0, THREAD_CLF_SAVED_SHIFT sw a0, THREAD_CORE_LOCAL_FLAGS(tp) - /* Restore all GPRs */ - load_xregs sp, THREAD_ABT_REG_RA, REG_RA + /* Check previous privilege mode by status.SPP */ + b_if_prev_priv_is_u t0, 1f + /* + * We are going to XRET to kernel mode. + * XSCRATCH is already zero to indicate that we are in kernel mode. + * We must keep kernel gp & tp, so skip restoring user gp & tp. + */ + j 2f +1: + /* + * We are going to XRET to user mode. + * XSCRATCH must be tp(thread_core_local) to be used in next trap. + * We also need to restore user gp & tp + */ + csrw CSR_XSCRATCH, tp load_xregs sp, THREAD_ABT_REG_GP, REG_GP load_xregs sp, THREAD_ABT_REG_TP, REG_TP +2: + /* Restore remaining GPRs */ + load_xregs sp, THREAD_ABT_REG_RA, REG_RA load_xregs sp, THREAD_ABT_REG_T0, REG_T0, REG_T2 load_xregs sp, THREAD_ABT_REG_S0, REG_S0, REG_S1 load_xregs sp, THREAD_ABT_REG_A0, REG_A0, REG_A7 @@ -637,37 +659,6 @@ FUNC thread_unwind_user_mode , : ret END_FUNC thread_unwind_user_mode -/* - * void thread_exit_user_mode(unsigned long a0, unsigned long a1, - * unsigned long a2, unsigned long a3, - * unsigned long sp, unsigned long pc, - * unsigned long status); - */ -FUNC thread_exit_user_mode , : - /* Set kernel stack pointer */ - mv sp, a4 - - /* Set xSTATUS */ - csrw CSR_XSTATUS, a6 - - /* - * Zeroize xSCRATCH to indicate to thread_trap_vect() - * that we are executing in kernel. - */ - csrw CSR_XSCRATCH, zero - - /* - * Mask all interrupts first. Interrupts will be unmasked after - * returning from __thread_enter_user_mode(). - */ - csrw CSR_XIE, zero - - /* Set epc as thread_unwind_user_mode() */ - csrw CSR_XEPC, a5 - - XRET -END_FUNC thread_exit_user_mode - /* * uint32_t __thread_enter_user_mode(struct thread_ctx_regs *regs, * uint32_t *exit_status0, @@ -695,7 +686,7 @@ FUNC __thread_enter_user_mode , : /* * Save kernel stack pointer to ensure that - * thread_exit_user_mode() uses correct stack pointer. + * exception_from_user() uses correct stack pointer. */ store_xregs s0, THREAD_CTX_KERN_SP, REG_SP