summaryrefslogtreecommitdiff
path: root/x86_64/cswitch.S
blob: 1a7471c31b23f8020ba74f1570a6cab13eedea49 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
/* 
 * Mach Operating System
 * Copyright (c) 1991,1990 Carnegie Mellon University
 * All Rights Reserved.
 * 
 * Permission to use, copy, modify and distribute this software and its
 * documentation is hereby granted, provided that both the copyright
 * notice and this permission notice appear in all copies of the
 * software, derivative works or modified versions, and any portions
 * thereof, and that both notices appear in supporting documentation.
 * 
 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
 * 
 * Carnegie Mellon requests users of this software to return to
 *
 *  Software Distribution Coordinator  or  Software.Distribution@CS.CMU.EDU
 *  School of Computer Science
 *  Carnegie Mellon University
 *  Pittsburgh PA 15213-3890
 *
 * any improvements or extensions that they make and grant Carnegie Mellon
 * the rights to redistribute these changes.
 */

#include <mach/machine/asm.h>

#include <i386/i386/proc_reg.h>
#include <i386/i386/i386asm.h>
#include <i386/i386/cpu_number.h>

/*
 * Context switch routines for x86_64.
 */

ENTRY(Load_context)
	movq	S_ARG0,%rcx			/* get thread */
	movq	TH_KERNEL_STACK(%rcx),%rcx	/* get kernel stack */
	lea	KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rdx
						/* point to stack top */
	CPU_NUMBER(%eax)
	movq	%rcx,CX(EXT(active_stacks),%eax)	/* store stack address */
	movq	%rdx,CX(EXT(kernel_stack),%eax)	/* store stack top */

/* XXX complete */

	movq	KSS_ESP(%rcx),%rsp		/* switch stacks */
	movq	KSS_EBP(%rcx),%rbp		/* restore registers */
	movq	KSS_EBX(%rcx),%rbx
	movq	KSS_R12(%rcx),%r12
	movq	KSS_R13(%rcx),%r13
	movq	KSS_R14(%rcx),%r14
	movq	KSS_R15(%rcx),%r15
	xorq	%rax,%rax			/* return zero (no old thread) */
	jmp	*KSS_EIP(%rcx)			/* resume thread */

/*
 *	This really only has to save registers
 *	when there is no explicit continuation.
 */

ENTRY(Switch_context)
	CPU_NUMBER(%eax)
	movq	CX(EXT(active_stacks),%eax),%rcx	/* get old kernel stack */

	movq	%r12,KSS_R12(%rcx)		/* save registers */
	movq	%r13,KSS_R13(%rcx)
	movq	%r14,KSS_R14(%rcx)
	movq	%r15,KSS_R15(%rcx)
	movq	%rbx,KSS_EBX(%rcx)
	movq	%rbp,KSS_EBP(%rcx)
	popq	KSS_EIP(%rcx)			/* save return PC */
	movq	%rsp,KSS_ESP(%rcx)		/* save SP */

	movq	S_ARG0,%rax			/* get old thread */
	movq	%rcx,TH_KERNEL_STACK(%rax)	/* save old stack */
	movq	S_ARG1,%rbx			/* get continuation */
	movq	%rbx,TH_SWAP_FUNC(%rax)		/* save continuation */

	movq	S_ARG2,%rsi			/* get new thread */

	movq	TH_KERNEL_STACK(%rsi),%rcx	/* get its kernel stack */
	lea     KERNEL_STACK_SIZE-IKS_SIZE-IEL_SIZE(%rcx),%rbx
	        				/* point to stack top */

	CPU_NUMBER(%eax)
	movq	%rsi,CX(EXT(active_threads),%eax)	/* new thread is active */
	movq	%rcx,CX(EXT(active_stacks),%eax)	/* set current stack */
	movq	%rbx,CX(EXT(kernel_stack),%eax)	/* set stack top */

	movq	KSS_ESP(%rcx),%rsp		/* switch stacks */
	movq	KSS_EBP(%rcx),%rbp		/* restore registers */
	movq	KSS_EBX(%rcx),%rbx
	movq	KSS_R12(%rcx),%r12
	movq	KSS_R13(%rcx),%r13
	movq	KSS_R14(%rcx),%r14
	movq	KSS_R15(%rcx),%r15
	jmp	*KSS_EIP(%rcx)			/* return old thread */

ENTRY(Thread_continue)
	movq	%rax,%rdi			/* push the thread argument */
	xorq	%rbp,%rbp			/* zero frame pointer */
	call	*%rbx				/* call real continuation */

#if	NCPUS > 1
/*
 * void switch_to_shutdown_context(thread_t thread,
 *				   void (*routine)(processor_t),
 *				   processor_t processor)
 *
 * saves the kernel context of the thread,
 * switches to the interrupt stack,
 * continues the thread (with thread_continue),
 * then runs routine on the interrupt stack.
 *
 * Assumes that the thread is a kernel thread (thus
 * has no FPU state)
 */
ENTRY(switch_to_shutdown_context)
ud2
	CPU_NUMBER(%eax)
	movq	EXT(active_stacks)(,%eax,8),%rcx	/* get old kernel stack */
	movq	%r12,KSS_R12(%rcx)		/* save registers */
	movq	%r13,KSS_R13(%rcx)
	movq	%r14,KSS_R14(%rcx)
	movq	%r15,KSS_R15(%rcx)
	movq	%rbx,KSS_EBX(%rcx)
	movq	%rbp,KSS_EBP(%rcx)
	popq	KSS_EIP(%rcx)   		/* save return PC */
	movq	%rsp,KSS_ESP(%rcx)		/* save SP */

	movq	S_ARG0,%rax			/* get old thread */
	movq	%rcx,TH_KERNEL_STACK(%rax)	/* save old stack */
	movq	$0,TH_SWAP_FUNC(%rax)		/* clear continuation */
	movq	S_ARG1,%rbx			/* get routine to run next */
	movq	S_ARG2,%rsi			/* get its argument */

	CPU_NUMBER(%eax)
	movq	EXT(interrupt_stack)(,%eax,8),%rcx	/* point to its interrupt stack */
	lea	INTSTACK_SIZE(%rcx),%rsp	/* switch to it (top) */

	movq	%rax,%rdi			/* push thread */
	call	EXT(thread_dispatch)		/* reschedule thread */

	movq	%rsi,%rdi			/* push argument */
	call	*%rbx				/* call routine to run */
	hlt					/* (should never return) */

#endif	/* NCPUS > 1 */