summaryrefslogtreecommitdiff
path: root/x86_64/xen_locore.S
blob: 967c8904862eada137f5134d81a0e2bc84bfe9c2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
/*
 *  Copyright (C) 2006-2009 Free Software Foundation
 *
 * This program is free software ; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation ; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY ; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with the program ; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <mach/machine/asm.h>

#include <i386/i386/i386asm.h>
#include <i386/i386/cpu_number.h>
#include <i386/i386/xen.h>

	.data	2
int_active:
	.long	0
	

	.text
	.globl	hyp_callback, hyp_failsafe_callback
	P2ALIGN(TEXT_ALIGN)
hyp_callback:
	popq	%rcx
	popq	%r11
	pushq	%rax
	jmp	EXT(all_intrs)

ENTRY(interrupt)
	incl	int_active		/* currently handling interrupts */
	call	EXT(hyp_c_callback)	/* call generic interrupt routine */
	decl	int_active		/* stopped handling interrupts */
	sti
	ret

/* FIXME: if we're _very_ unlucky, we may be re-interrupted, filling stack
 *
 * Far from trivial, see mini-os. That said, maybe we could just, before poping
 * everything (which is _not_ destructive), save sp into a known place and use
 * it+jmp back?
 *
 * Mmm, there seems to be an iret hypcall that does exactly what we want:
 * perform iret, and if IF is set, clear the interrupt mask.
 */

/* Pfff, we have to check pending interrupts ourselves. Some other DomUs just make an hypercall for retriggering the irq. Not sure it's really easier/faster */
ENTRY(hyp_sti)
	pushq	%rbp
	movq	%rsp, %rbp
_hyp_sti:
	movb	$0,hyp_shared_info+CPU_CLI /* Enable interrupts */
	cmpl	$0,int_active		/* Check whether we were already checking pending interrupts */
	jz	0f
	popq	%rbp
	ret				/* Already active, just return */
0:
	/* Not active, check pending interrupts by hand */
	/* no memory barrier needed on x86 */
	cmpb	$0,hyp_shared_info+CPU_PENDING
	jne	0f
	popq	%rbp
	ret
0:
	movb	$0xff,hyp_shared_info+CPU_CLI
1:
	pushq	%rax
	pushq	%rcx
	pushq	%rdx
	pushq	%rdi
	pushq	%rsi
	pushq	%r8
	pushq	%r9
	pushq	%r10
	pushq	%r11
	incl	int_active		/* currently handling interrupts */

	xorq	%rdi,%rdi
	xorq	%rsi,%rsi
	call	EXT(hyp_c_callback)

	popq	%r11
	popq	%r10
	popq	%r9
	popq	%r8
	popq	%rsi
	popq	%rdi
	popq	%rdx
	popq	%rcx
	popq	%rax
	decl	int_active		/* stopped handling interrupts */
	cmpb	$0,hyp_shared_info+CPU_PENDING
	jne	1b
	jmp	_hyp_sti

/* Hypervisor failed to reload segments. Dump them.  */
hyp_failsafe_callback:
ud2
#if 1
/* TODO: FIXME */
	/* load sane segments */
	mov	%ss, %ax
#if 0
	mov	%ax, %ds
	mov	%ax, %es
#endif
	mov	%ax, %fs
	mov	%ax, %gs
	movq	%rsp, %rdi
	call	EXT(hyp_failsafe_c_callback)
#else
	popq	%rdx
	movq	%rdx,%ds
	popq	%rdx
	movq	%rdx,%es
	popq	%fs
	popq	%gs

movq (%rsp),%rax
ud2
	iretq
#endif

#undef iretq
ENTRY(hyp_iretq)
	testb	$2,1*8(%rsp)
	jnz	slow
	/* There is no ring1 on x86_64, we have to force ring 3 */
	orb	$3,1*8(%rsp)
	orb	$3,4*8(%rsp)
	iretq

slow:
/* There is no ring 1/2 on x86_64, so going back to user needs to go through
 * hypervisor */
	pushq	$0
	jmp	__hyp_iret