summaryrefslogtreecommitdiff
path: root/x86_64
diff options
context:
space:
mode:
authorLuca Dariz <luca@orpolo.org>2023-06-17 23:17:54 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2023-06-17 23:18:28 +0200
commit5e597575b78af2464117437c8bc41e632d7e112d (patch)
tree716c79533870e6e3ca0421d2f00e9a9b2f2944d5 /x86_64
parent54d025d426d0442a0f33c98272dee167acdb8613 (diff)
x86_64: add a critical section on entry and exit from syscall/sysret
When entering a syscall we're still using the user stack, so we can't reliably handle exceptions or interrupts, otherwise a user thread can easily crash the machine with an invalid stack. Instead, disable interrupts and (hopefullly) avoid traps in the fragments where we need to have the user stack in RSP. * i386/i386/ldt.c: mask interrupts and IOPL on syscall entry * x86_64/locore.S: keep interrupts disabled when we use the user stack
Diffstat (limited to 'x86_64')
-rw-r--r--x86_64/locore.S4
1 files changed, 3 insertions, 1 deletions
diff --git a/x86_64/locore.S b/x86_64/locore.S
index 4d61d618..2938e430 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -1390,9 +1390,10 @@ ENTRY(syscall64)
mov %r11,%rbx /* prepare for error handling */
mov %r10,%rcx /* fix arg3 location according to C ABI */
- /* switch to kernel stack */
+ /* switch to kernel stack, then we can enable interrupts */
CPU_NUMBER(%r11)
movq CX(EXT(kernel_stack),%r11),%rsp
+ sti
/* Now we have saved state and args 1-6 are in place.
* Before invoking the syscall we do some bound checking and,
@@ -1453,6 +1454,7 @@ _syscall64_check_for_ast:
_syscall64_restore_state:
/* Restore thread state and return to user using sysret. */
+ cli /* block interrupts when using the user stack in kernel space */
movq CX(EXT(active_threads),%r11),%r11 /* point to current thread */
movq TH_PCB(%r11),%r11 /* point to pcb */
addq $ PCB_ISS,%r11 /* point to saved state */