summaryrefslogtreecommitdiff
path: root/x86_64
diff options
context:
space:
mode:
authorSamuel Thibault <samuel.thibault@ens-lyon.org>2023-08-12 01:17:01 +0200
committerSamuel Thibault <samuel.thibault@ens-lyon.org>2023-08-12 01:17:01 +0200
commit5897901d6ee40427f76997c82c7fd64789a6ef7f (patch)
treef941707ec32a49d429ed2350fb78fccf07616238 /x86_64
parent1cf67399a9bfa13ab79974e345eb87fa70753da1 (diff)
x86_64: fix NCPUS > 1 build of CX() macro
With the kernel gone to -2GB, the base+index addressing needs to use a 64bit register index.
Diffstat (limited to 'x86_64')
-rw-r--r--x86_64/cswitch.S18
-rw-r--r--x86_64/locore.S78
-rw-r--r--x86_64/spl.S20
3 files changed, 58 insertions, 58 deletions
diff --git a/x86_64/cswitch.S b/x86_64/cswitch.S
index 015e884c..1f2e8e9d 100644
--- a/x86_64/cswitch.S
+++ b/x86_64/cswitch.S
@@ -40,8 +40,8 @@ ENTRY(Load_context)
lea KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rdx
/* point to stack top */
CPU_NUMBER(%eax)
- movq %rcx,CX(EXT(active_stacks),%eax) /* store stack address */
- movq %rdx,CX(EXT(kernel_stack),%eax) /* store stack top */
+ movq %rcx,CX(EXT(active_stacks),%rax) /* store stack address */
+ movq %rdx,CX(EXT(kernel_stack),%rax) /* store stack top */
/* XXX complete */
@@ -62,7 +62,7 @@ ENTRY(Load_context)
ENTRY(Switch_context)
CPU_NUMBER(%eax)
- movq CX(EXT(active_stacks),%eax),%rcx /* get old kernel stack */
+ movq CX(EXT(active_stacks),%rax),%rcx /* get old kernel stack */
movq %r12,KSS_R12(%rcx) /* save registers */
movq %r13,KSS_R13(%rcx)
@@ -85,9 +85,9 @@ ENTRY(Switch_context)
/* point to stack top */
CPU_NUMBER(%eax)
- movq %rsi,CX(EXT(active_threads),%eax) /* new thread is active */
- movq %rcx,CX(EXT(active_stacks),%eax) /* set current stack */
- movq %rbx,CX(EXT(kernel_stack),%eax) /* set stack top */
+ movq %rsi,CX(EXT(active_threads),%rax) /* new thread is active */
+ movq %rcx,CX(EXT(active_stacks),%rax) /* set current stack */
+ movq %rbx,CX(EXT(kernel_stack),%rax) /* set stack top */
movq KSS_ESP(%rcx),%rsp /* switch stacks */
movq KSS_EBP(%rcx),%rbp /* restore registers */
@@ -120,7 +120,7 @@ ENTRY(Thread_continue)
ENTRY(switch_to_shutdown_context)
ud2
CPU_NUMBER(%eax)
- movq EXT(active_stacks)(,%eax,8),%rcx /* get old kernel stack */
+ movq CX(EXT(active_stacks),%rax),%rcx /* get old kernel stack */
movq %r12,KSS_R12(%rcx) /* save registers */
movq %r13,KSS_R13(%rcx)
movq %r14,KSS_R14(%rcx)
@@ -136,8 +136,8 @@ ud2
movq S_ARG1,%rbx /* get routine to run next */
movq S_ARG2,%rsi /* get its argument */
- CPU_NUMBER(%eax)
- movq CX(EXT(int_stack_base),%eax),%rcx /* point to its interrupt stack */
+ CPU_NUMBER(%ecx)
+ movq CX(EXT(int_stack_base),%rcx),%rcx /* point to its interrupt stack */
lea INTSTACK_SIZE(%rcx),%rsp /* switch to it (top) */
movq %rax,%rdi /* push thread */
diff --git a/x86_64/locore.S b/x86_64/locore.S
index 9bafaac5..070644bd 100644
--- a/x86_64/locore.S
+++ b/x86_64/locore.S
@@ -228,16 +228,16 @@ LEXT(retry_table_end) ;\
pushf /* Save flags */ ;\
cli /* block interrupts */ ;\
movl VA_ETC,%ebx /* get timer value */ ;\
- movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
- movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
subl %ecx,%ebx /* elapsed = new-old */ ;\
- movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
jns 0f /* if overflow, */ ;\
call timer_normalize /* normalize timer */ ;\
0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
/* switch to sys timer */;\
- movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* make it current */ ;\
popf /* allow interrupts */
/*
@@ -251,10 +251,10 @@ LEXT(retry_table_end) ;\
pushf /* Save flags */ ;\
cli /* block interrupts */ ;\
movl VA_ETC,%ebx /* get timer value */ ;\
- movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
- movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
subl %ecx,%ebx /* elapsed = new-old */ ;\
- movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
jns 0f /* if overflow, */ ;\
pushq %rax /* save %rax */ ;\
@@ -262,7 +262,7 @@ LEXT(retry_table_end) ;\
popq %rax /* restore %rax */ ;\
0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
/* switch to sys timer */;\
- movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* make it current */ ;\
popf /* allow interrupts */
/*
@@ -274,16 +274,16 @@ LEXT(retry_table_end) ;\
#define TIME_TRAP_UEXIT \
cli /* block interrupts */ ;\
movl VA_ETC,%ebx /* get timer */ ;\
- movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
- movl %ebx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %ebx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
subl %ecx,%ebx /* elapsed = new-old */ ;\
- movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
addl %ebx,LOW_BITS(%ecx) /* add to low bits */ ;\
jns 0f /* if overflow, */ ;\
call timer_normalize /* normalize timer */ ;\
0: addl $(TH_USER_TIMER-TH_SYSTEM_TIMER),%ecx ;\
/* switch to user timer */;\
- movl %ecx,CX(EXT(current_timer),%edx) /* make it current */
+ movl %ecx,CX(EXT(current_timer),%rdx) /* make it current */
/*
* update time on interrupt entry.
@@ -294,14 +294,14 @@ LEXT(retry_table_end) ;\
*/
#define TIME_INT_ENTRY \
movl VA_ETC,%ecx /* get timer */ ;\
- movl CX(EXT(current_tstamp),%edx),%ebx /* get old time stamp */;\
- movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ movl CX(EXT(current_tstamp),%rdx),%ebx /* get old time stamp */;\
+ movl %ecx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
subl %ebx,%ecx /* elapsed = new-old */ ;\
- movl CX(EXT(current_timer),%edx),%ebx /* get current timer */ ;\
+ movl CX(EXT(current_timer),%rdx),%ebx /* get current timer */ ;\
addl %ecx,LOW_BITS(%ebx) /* add to low bits */ ;\
- leal CX(0,%edx),%ecx /* timer is 16 bytes */ ;\
- lea CX(EXT(kernel_timer),%edx),%ecx /* get interrupt timer*/;\
- movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
+ leal CX(0,%rdx),%ecx /* timer is 16 bytes */ ;\
+ lea CX(EXT(kernel_timer),%rdx),%ecx /* get interrupt timer*/;\
+ movl %ecx,CX(EXT(current_timer),%rdx) /* set timer */
/*
* update time on interrupt exit.
@@ -311,10 +311,10 @@ LEXT(retry_table_end) ;\
*/
#define TIME_INT_EXIT \
movl VA_ETC,%eax /* get timer */ ;\
- movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
- movl %eax,CX(EXT(current_tstamp),%edx) /* set new time stamp */;\
+ movl CX(EXT(current_tstamp),%rdx),%ecx /* get old time stamp */;\
+ movl %eax,CX(EXT(current_tstamp),%rdx) /* set new time stamp */;\
subl %ecx,%eax /* elapsed = new-old */ ;\
- movl CX(EXT(current_timer),%edx),%ecx /* get current timer */ ;\
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */ ;\
addl %eax,LOW_BITS(%ecx) /* add to low bits */ ;\
jns 0f /* if overflow, */ ;\
call timer_normalize /* normalize timer */ ;\
@@ -322,7 +322,7 @@ LEXT(retry_table_end) ;\
jz 0f /* if overflow, */ ;\
movl %ebx,%ecx /* get old timer */ ;\
call timer_normalize /* normalize timer */ ;\
-0: movl %ebx,CX(EXT(current_timer),%edx) /* set timer */
+0: movl %ebx,CX(EXT(current_timer),%rdx) /* set timer */
/*
@@ -351,16 +351,16 @@ timer_normalize:
ENTRY(timer_switch)
CPU_NUMBER(%edx) /* get this CPU */
movl VA_ETC,%ecx /* get timer */
- movl CX(EXT(current_tstamp),%edx),%eax /* get old time stamp */
- movl %ecx,CX(EXT(current_tstamp),%edx) /* set new time stamp */
+ movl CX(EXT(current_tstamp),%rdx),%eax /* get old time stamp */
+ movl %ecx,CX(EXT(current_tstamp),%rdx) /* set new time stamp */
subl %ecx,%eax /* elapsed = new - old */
- movl CX(EXT(current_timer),%edx),%ecx /* get current timer */
+ movl CX(EXT(current_timer),%rdx),%ecx /* get current timer */
addl %eax,LOW_BITS(%ecx) /* add to low bits */
jns 0f /* if overflow, */
call timer_normalize /* normalize timer */
0:
movl S_ARG0,%ecx /* get new timer */
- movl %ecx,CX(EXT(current_timer),%edx) /* set timer */
+ movl %ecx,CX(EXT(current_timer),%rdx) /* set timer */
ret
/*
@@ -369,9 +369,9 @@ ENTRY(timer_switch)
ENTRY(start_timer)
CPU_NUMBER(%edx) /* get this CPU */
movl VA_ETC,%ecx /* get timer */
- movl %ecx,CX(EXT(current_tstamp),%edx) /* set initial time stamp */
+ movl %ecx,CX(EXT(current_tstamp),%rdx) /* set initial time stamp */
movl S_ARG0,%ecx /* get timer */
- movl %ecx,CX(EXT(current_timer),%edx) /* set initial timer */
+ movl %ecx,CX(EXT(current_timer),%rdx) /* set initial timer */
ret
#endif /* accurate timing */
@@ -596,7 +596,7 @@ trap_from_user:
CPU_NUMBER(%edx)
TIME_TRAP_UENTRY
- movq CX(EXT(kernel_stack),%edx),%rbx
+ movq CX(EXT(kernel_stack),%rdx),%rbx
xchgq %rbx,%rsp /* switch to kernel stack */
/* user regs pointer already set */
_take_trap:
@@ -618,10 +618,10 @@ _take_trap:
_return_from_trap:
CPU_NUMBER(%edx)
- cmpl $0,CX(EXT(need_ast),%edx)
+ cmpl $0,CX(EXT(need_ast),%rdx)
jz _return_to_user /* if we need an AST: */
- movq CX(EXT(kernel_stack),%edx),%rsp
+ movq CX(EXT(kernel_stack),%rdx),%rsp
/* switch to kernel stack */
call EXT(i386_astintr) /* take the AST */
popq %rsp /* switch back to PCB stack */
@@ -668,13 +668,13 @@ trap_from_kernel:
je 1f /* OK if so */
CPU_NUMBER(%edx) /* get CPU number */
- cmpq CX(EXT(kernel_stack),%edx),%rsp
+ cmpq CX(EXT(kernel_stack),%rdx),%rsp
/* already on kernel stack? */
ja 0f
- cmpq CX(EXT(active_stacks),%edx),%rsp
+ cmpq CX(EXT(active_stacks),%rdx),%rsp
ja 1f /* switch if not */
0:
- movq CX(EXT(kernel_stack),%edx),%rsp
+ movq CX(EXT(kernel_stack),%rdx),%rsp
1:
pushq %rbx /* save old stack */
movq %rbx,%rdi /* pass as parameter */
@@ -807,7 +807,7 @@ ENTRY(all_intrs)
CPU_NUMBER(%edx)
- movq CX(EXT(int_stack_top),%edx),%rcx
+ movq CX(EXT(int_stack_top),%rdx),%rcx
xchgq %rcx,%rsp /* switch to interrupt stack */
@@ -842,7 +842,7 @@ LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
testb $2,I_CS(%rsp) /* user mode, */
jz 1f /* check for ASTs */
0:
- cmpq $0,CX(EXT(need_ast),%edx)
+ cmpq $0,CX(EXT(need_ast),%rdx)
jnz ast_from_interrupt /* take it if so */
1:
POP_SEGMENTS_ISR(%rdx)
@@ -914,7 +914,7 @@ ast_from_interrupt:
CPU_NUMBER(%edx)
TIME_TRAP_UENTRY
- movq CX(EXT(kernel_stack),%edx),%rsp
+ movq CX(EXT(kernel_stack),%rdx),%rsp
/* switch to kernel stack */
call EXT(i386_astintr) /* take the AST */
popq %rsp /* back to PCB stack */
@@ -1164,7 +1164,7 @@ syscall_entry_2:
CPU_NUMBER(%edx)
TIME_TRAP_SENTRY
- movq CX(EXT(kernel_stack),%edx),%rbx
+ movq CX(EXT(kernel_stack),%rdx),%rbx
/* get current kernel stack */
xchgq %rbx,%rsp /* switch stacks - %ebx points to */
/* user registers. */
@@ -1174,7 +1174,7 @@ syscall_entry_2:
* Check for MACH or emulated system call
*/
syscall_entry_3:
- movq CX(EXT(active_threads),%edx),%rdx
+ movq CX(EXT(active_threads),%rdx),%rdx
/* point to current thread */
movq TH_TASK(%rdx),%rdx /* point to task */
movq TASK_EMUL(%rdx),%rdx /* get emulation vector */
diff --git a/x86_64/spl.S b/x86_64/spl.S
index e4f87d85..80c65c1e 100644
--- a/x86_64/spl.S
+++ b/x86_64/spl.S
@@ -48,7 +48,7 @@ lock orl $1,hyp_shared_info+CPU_PENDING_SEL; /* Yes, activate it */ \
ENTRY(spl0)
mb;
CPU_NUMBER(%edx)
- movl CX(EXT(curr_ipl),%edx),%eax /* save current ipl */
+ movl CX(EXT(curr_ipl),%rdx),%eax /* save current ipl */
pushq %rax
cli /* disable interrupts */
#ifdef LINUX_DEV
@@ -77,9 +77,9 @@ ENTRY(spl0)
cli /* disable interrupts */
1:
CPU_NUMBER(%edx)
- cmpl $(SPL0),CX(EXT(curr_ipl),%edx) /* are we at spl0? */
+ cmpl $(SPL0),CX(EXT(curr_ipl),%rdx) /* are we at spl0? */
je 1f /* yes, all done */
- movl $(SPL0),CX(EXT(curr_ipl),%edx) /* set ipl */
+ movl $(SPL0),CX(EXT(curr_ipl),%rdx) /* set ipl */
#ifdef MACH_XEN
movl EXT(int_mask)+SPL0*4,%eax
/* get xen mask */
@@ -124,7 +124,7 @@ ENTRY(spl7)
cli
CPU_NUMBER(%edx)
movl $SPL7,%eax
- xchgl CX(EXT(curr_ipl),%edx),%eax
+ xchgl CX(EXT(curr_ipl),%rdx),%eax
ret
ENTRY(splx)
@@ -132,7 +132,7 @@ ENTRY(splx)
CPU_NUMBER(%eax)
#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
/* First make sure that if we're exitting from ipl7, IF is still cleared */
- cmpl $SPL7,CX(EXT(curr_ipl),%eax) /* from ipl7? */
+ cmpl $SPL7,CX(EXT(curr_ipl),%rax) /* from ipl7? */
jne 0f
pushfq
popq %rax
@@ -145,7 +145,7 @@ ENTRY(splx)
testl %edx,%edx /* spl0? */
jz EXT(spl0) /* yes, handle specially */
CPU_NUMBER(%eax)
- cmpl CX(EXT(curr_ipl),%eax),%edx /* same ipl as current? */
+ cmpl CX(EXT(curr_ipl),%rax),%edx /* same ipl as current? */
jne spl /* no */
cmpl $SPL7,%edx /* spl7? */
je 1f /* to ipl7, don't enable interrupts */
@@ -194,9 +194,9 @@ splx_cli:
xorl %edx,%edx /* edx = ipl 0 */
2:
CPU_NUMBER(%eax)
- cmpl CX(EXT(curr_ipl),%eax),%edx /* same ipl as current? */
+ cmpl CX(EXT(curr_ipl),%rax),%edx /* same ipl as current? */
je 1f /* yes, all done */
- movl %edx,CX(EXT(curr_ipl),%eax) /* set ipl */
+ movl %edx,CX(EXT(curr_ipl),%rax) /* set ipl */
#ifdef MACH_XEN
movl EXT(int_mask),%eax
movl (%eax,%edx,4),%eax
@@ -216,7 +216,7 @@ spl:
CPU_NUMBER(%eax)
#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
/* First make sure that if we're exitting from ipl7, IF is still cleared */
- cmpl $SPL7,CX(EXT(curr_ipl),%eax) /* from ipl7? */
+ cmpl $SPL7,CX(EXT(curr_ipl),%rax) /* from ipl7? */
jne 0f
pushfq
popq %rax
@@ -235,7 +235,7 @@ spl:
#endif
cli /* disable interrupts */
CPU_NUMBER(%eax)
- xchgl CX(EXT(curr_ipl),%eax),%edx /* set ipl */
+ xchgl CX(EXT(curr_ipl),%rax),%edx /* set ipl */
#ifdef MACH_XEN
XEN_SETMASK() /* program PICs with new mask */
#endif