/* * Copyright 2020, Data61, CSIRO (ABN 41 687 119 230) * Copyright 2015, 2016 Hesham Almatary * * SPDX-License-Identifier: GPL-2.0-only */ #include #include #include #include #include #include #include #include #include #include #include /** DONT_TRANSLATE */ void VISIBLE NORETURN restore_user_context(void) { word_t cur_thread_reg = (word_t) NODE_STATE(ksCurThread)->tcbArch.tcbContext.registers; c_exit_hook(); NODE_UNLOCK_IF_HELD; #ifdef ENABLE_SMP_SUPPORT word_t sp; asm volatile("csrr %0, sscratch" : "=r"(sp)); sp -= sizeof(word_t); *((word_t *)sp) = cur_thread_reg; #endif #ifdef CONFIG_HAVE_FPU lazyFPURestore(NODE_STATE(ksCurThread)); set_tcb_fs_state(NODE_STATE(ksCurThread), isFpuEnable()); #endif asm volatile( "mv t0, %[cur_thread] \n" LOAD_S " ra, (0*%[REGSIZE])(t0) \n" LOAD_S " sp, (1*%[REGSIZE])(t0) \n" LOAD_S " gp, (2*%[REGSIZE])(t0) \n" /* skip tp */ /* skip x5/t0 */ /* no-op store conditional to clear monitor state */ /* this may succeed in implementations with very large reservations, but the saved ra is dead */ "sc.w zero, zero, (t0)\n" LOAD_S " t2, (6*%[REGSIZE])(t0) \n" LOAD_S " s0, (7*%[REGSIZE])(t0) \n" LOAD_S " s1, (8*%[REGSIZE])(t0) \n" LOAD_S " a0, (9*%[REGSIZE])(t0) \n" LOAD_S " a1, (10*%[REGSIZE])(t0) \n" LOAD_S " a2, (11*%[REGSIZE])(t0) \n" LOAD_S " a3, (12*%[REGSIZE])(t0) \n" LOAD_S " a4, (13*%[REGSIZE])(t0) \n" LOAD_S " a5, (14*%[REGSIZE])(t0) \n" LOAD_S " a6, (15*%[REGSIZE])(t0) \n" LOAD_S " a7, (16*%[REGSIZE])(t0) \n" LOAD_S " s2, (17*%[REGSIZE])(t0) \n" LOAD_S " s3, (18*%[REGSIZE])(t0) \n" LOAD_S " s4, (19*%[REGSIZE])(t0) \n" LOAD_S " s5, (20*%[REGSIZE])(t0) \n" LOAD_S " s6, (21*%[REGSIZE])(t0) \n" LOAD_S " s7, (22*%[REGSIZE])(t0) \n" LOAD_S " s8, (23*%[REGSIZE])(t0) \n" LOAD_S " s9, (24*%[REGSIZE])(t0) \n" LOAD_S " s10, (25*%[REGSIZE])(t0)\n" LOAD_S " s11, (26*%[REGSIZE])(t0)\n" LOAD_S " t3, (27*%[REGSIZE])(t0) \n" LOAD_S " t4, (28*%[REGSIZE])(t0) \n" LOAD_S " t5, (29*%[REGSIZE])(t0) \n" LOAD_S " t6, (30*%[REGSIZE])(t0) \n" /* Get next restored tp */ LOAD_S " t1, (3*%[REGSIZE])(t0) \n" /* get restored tp */ "add tp, t1, x0 \n" /* get sepc */ LOAD_S " t1, (34*%[REGSIZE])(t0)\n" "csrw sepc, t1 \n" #ifndef ENABLE_SMP_SUPPORT /* Write back sscratch with cur_thread_reg to get it back on the next trap entry */ "csrw sscratch, t0 \n" #endif LOAD_S " t1, (32*%[REGSIZE])(t0) \n" "csrw sstatus, t1\n" LOAD_S " t1, (5*%[REGSIZE])(t0) \n" LOAD_S " t0, (4*%[REGSIZE])(t0) \n" "sret" : /* no output */ : [REGSIZE] "i"(sizeof(word_t)), [cur_thread] "r"(cur_thread_reg) : "memory" ); UNREACHABLE(); } void VISIBLE NORETURN c_handle_interrupt(void) { NODE_LOCK_IRQ_IF(getActiveIRQ() != irq_remote_call_ipi); c_entry_hook(); handleInterruptEntry(); restore_user_context(); UNREACHABLE(); } void VISIBLE NORETURN c_handle_exception(void) { NODE_LOCK_SYS; c_entry_hook(); word_t scause = read_scause(); switch (scause) { case RISCVInstructionAccessFault: case RISCVLoadAccessFault: case RISCVStoreAccessFault: case RISCVLoadPageFault: case RISCVStorePageFault: case RISCVInstructionPageFault: handleVMFaultEvent(scause); break; default: #ifdef CONFIG_HAVE_FPU if (!isFpuEnable()) { /* we assume the illegal instruction is caused by FPU first */ handleFPUFault(); setNextPC(NODE_STATE(ksCurThread), getRestartPC(NODE_STATE(ksCurThread))); break; } #endif handleUserLevelFault(scause, 0); break; } restore_user_context(); UNREACHABLE(); } void VISIBLE NORETURN slowpath(syscall_t syscall) { if (unlikely(syscall < SYSCALL_MIN || syscall > SYSCALL_MAX)) { #ifdef TRACK_KERNEL_ENTRIES ksKernelEntry.path = Entry_UnknownSyscall; #endif /* TRACK_KERNEL_ENTRIES */ /* Contrary to the name, this handles all non-standard syscalls used in * debug builds also. */ handleUnknownSyscall(syscall); } else { #ifdef TRACK_KERNEL_ENTRIES ksKernelEntry.is_fastpath = 0; #endif /* TRACK KERNEL ENTRIES */ handleSyscall(syscall); } restore_user_context(); UNREACHABLE(); } #ifdef CONFIG_FASTPATH ALIGN(L1_CACHE_LINE_SIZE) #ifdef CONFIG_KERNEL_MCS void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo, word_t reply) #else void VISIBLE c_handle_fastpath_reply_recv(word_t cptr, word_t msgInfo) #endif { NODE_LOCK_SYS; c_entry_hook(); #ifdef TRACK_KERNEL_ENTRIES benchmark_debug_syscall_start(cptr, msgInfo, SysReplyRecv); ksKernelEntry.is_fastpath = 1; #endif /* DEBUG */ #ifdef CONFIG_KERNEL_MCS fastpath_reply_recv(cptr, msgInfo, reply); #else fastpath_reply_recv(cptr, msgInfo); #endif UNREACHABLE(); } ALIGN(L1_CACHE_LINE_SIZE) void VISIBLE c_handle_fastpath_call(word_t cptr, word_t msgInfo) { NODE_LOCK_SYS; c_entry_hook(); #ifdef TRACK_KERNEL_ENTRIES benchmark_debug_syscall_start(cptr, msgInfo, SysCall); ksKernelEntry.is_fastpath = 1; #endif /* DEBUG */ fastpath_call(cptr, msgInfo); UNREACHABLE(); } #endif void VISIBLE NORETURN c_handle_syscall(word_t cptr, word_t msgInfo, syscall_t syscall) { NODE_LOCK_SYS; c_entry_hook(); #ifdef TRACK_KERNEL_ENTRIES benchmark_debug_syscall_start(cptr, msgInfo, syscall); ksKernelEntry.is_fastpath = 0; #endif /* DEBUG */ slowpath(syscall); UNREACHABLE(); }