summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2023-10-28 13:05:33 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2023-10-28 13:06:39 +0200
commit0ea8f34d6b2d37cb5027a9cd8d143b0d6b701613 (patch)
tree36b06a38574b15555466f08dcfa1deb95ca28df3
parentfc47cd4ec8314c3df45a6cedc2d633bd52bca01e (diff)
64bit: Fix locore build
To allow references to int_stack_base to be quite unconstrained, we need to use 64bit register indexing. CPU_NUMBER_NO_GS was missing a 64bit variant. CPU_NUMBER_NO_STACK assumes being passed a 32bit register.
-rw-r--r--i386/i386/cpu_number.h23
-rw-r--r--x86_64/locore.S10
2 files changed, 27 insertions, 6 deletions
diff --git a/i386/i386/cpu_number.h b/i386/i386/cpu_number.h
index 7ec3da5f..67c19e9b 100644
--- a/i386/i386/cpu_number.h
+++ b/i386/i386/cpu_number.h
@@ -47,6 +47,7 @@
shrl $24, reg ;\
movl %cs:CX(cpu_id_lut, reg), reg ;\
+#ifdef __i386__
/* Never call CPU_NUMBER_NO_GS(%esi) */
#define CPU_NUMBER_NO_GS(reg) \
pushl %esi ;\
@@ -63,7 +64,27 @@
popl %ebx ;\
popl %eax ;\
movl %esi, reg ;\
- popl %esi ;\
+ popl %esi
+#endif
+#ifdef __x86_64__
+/* Never call CPU_NUMBER_NO_GS(%esi) */
+#define CPU_NUMBER_NO_GS(reg) \
+ pushq %rsi ;\
+ pushq %rax ;\
+ pushq %rbx ;\
+ pushq %rcx ;\
+ pushq %rdx ;\
+ movl $1, %eax ;\
+ cpuid ;\
+ shrl $24, %ebx ;\
+ movl %cs:CX(cpu_id_lut, %ebx), %esi ;\
+ popq %rdx ;\
+ popq %rcx ;\
+ popq %rbx ;\
+ popq %rax ;\
+ movl %esi, reg ;\
+ popq %rsi
+#endif
#define CPU_NUMBER(reg) \
movl MY(CPU_ID), reg;
diff --git a/x86_64/locore.S b/x86_64/locore.S
index 2db0d49b..af3809ee 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -696,7 +696,7 @@ trap_from_kernel:
CPU_NUMBER(%ecx)
and $(~(INTSTACK_SIZE-1)),%rdx
- cmpq CX(EXT(int_stack_base),%ecx),%rdx
+ cmpq CX(EXT(int_stack_base),%rcx),%rdx
je 1f /* OK if so */
movl %ecx,%edx
@@ -828,7 +828,7 @@ ENTRY(all_intrs)
CPU_NUMBER_NO_GS(%ecx)
movq %rsp,%rdx /* on an interrupt stack? */
and $(~(INTSTACK_SIZE-1)),%rdx
- cmpq %ss:CX(EXT(int_stack_base),%ecx),%rdx
+ cmpq %ss:CX(EXT(int_stack_base),%rcx),%rdx
je int_from_intstack /* if not: */
SET_KERNEL_SEGMENTS(%rdx) /* switch to kernel segments */
@@ -888,7 +888,7 @@ LEXT(return_to_iret) /* to find the return from calling interrupt) */
int_from_intstack:
CPU_NUMBER_NO_GS(%edx)
- cmpq CX(EXT(int_stack_base),%edx),%rsp /* seemingly looping? */
+ cmpq CX(EXT(int_stack_base),%rdx),%rsp /* seemingly looping? */
jb stack_overflowed /* if not: */
call EXT(interrupt) /* call interrupt routine */
_return_to_iret_i: /* ( label for kdb_kintr) */
@@ -1408,7 +1408,7 @@ ENTRY(syscall64)
mov %r10,%rcx /* fix arg3 location according to C ABI */
/* switch to kernel stack, then we can enable interrupts */
- CPU_NUMBER_NO_STACK(%r11)
+ CPU_NUMBER_NO_STACK(%r11d)
movq CX(EXT(kernel_stack),%r11),%rsp
sti
@@ -1447,7 +1447,7 @@ _syscall64_call:
_syscall64_check_for_ast:
/* Check for ast. */
- CPU_NUMBER_NO_GS(%r11)
+ CPU_NUMBER_NO_GS(%r11d)
cmpl $0,CX(EXT(need_ast),%r11)
jz _syscall64_restore_state