summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRoland McGrath <roland@gnu.org>2002-05-28 06:27:03 +0000
committerThomas Schwinge <tschwinge@gnu.org>2009-06-17 23:50:14 +0200
commit2c4db5385f9f7770fad1ef50dc4e80429fba828f (patch)
tree1fa9fce8c3465fa38c939ef3989ca73ddd04d99c
parente0d56ef075b7b5c3e10efcd2aacfdc2cebb4b278 (diff)
Alpha support files verbatim from CMU release MK83a.
-rw-r--r--ChangeLog56
-rw-r--r--alpha/alpha/alpha_cache.S91
-rw-r--r--alpha/alpha/alpha_copyin.S461
-rw-r--r--alpha/alpha/alpha_cpu.S500
-rw-r--r--alpha/alpha/alpha_cpu.h208
-rw-r--r--alpha/alpha/alpha_init.c316
-rw-r--r--alpha/alpha/alpha_instruction.c408
-rw-r--r--alpha/alpha/alpha_lock.S310
-rw-r--r--alpha/alpha/alpha_mem_ops.c285
-rw-r--r--alpha/alpha/alpha_misc.c360
-rw-r--r--alpha/alpha/alpha_scb.c88
-rw-r--r--alpha/alpha/alpha_scb.h67
-rw-r--r--alpha/alpha/alpha_startup.c66
-rw-r--r--alpha/alpha/ast.h45
-rw-r--r--alpha/alpha/ast_check.c69
-rw-r--r--alpha/alpha/ast_types.h51
-rw-r--r--alpha/alpha/autoconf.c317
-rw-r--r--alpha/alpha/c_misc.c421
-rw-r--r--alpha/alpha/clock.c84
-rw-r--r--alpha/alpha/clock.h73
-rw-r--r--alpha/alpha/context.S476
-rw-r--r--alpha/alpha/context.h85
-rw-r--r--alpha/alpha/cpu_number.h49
-rw-r--r--alpha/alpha/frame.h66
-rw-r--r--alpha/alpha/lock.h128
-rw-r--r--alpha/alpha/locore.S755
-rw-r--r--alpha/alpha/mach_param.h57
-rw-r--r--alpha/alpha/machspl.h68
-rw-r--r--alpha/alpha/parse_args.c475
-rw-r--r--alpha/alpha/pcb.c893
-rw-r--r--alpha/alpha/pmap.c2791
-rw-r--r--alpha/alpha/pmap.h403
-rw-r--r--alpha/alpha/prom_interface.S162
-rw-r--r--alpha/alpha/prom_interface.h322
-rw-r--r--alpha/alpha/prom_routines.S125
-rw-r--r--alpha/alpha/prom_routines.h83
-rw-r--r--alpha/alpha/setjmp.h48
-rw-r--r--alpha/alpha/start.S950
-rw-r--r--alpha/alpha/supage.S171
-rw-r--r--alpha/alpha/thread.h223
-rw-r--r--alpha/alpha/time_stamp.h44
-rw-r--r--alpha/alpha/trap.c935
-rw-r--r--alpha/alpha/trap.h72
-rw-r--r--alpha/alpha/vm_tuning.h45
-rw-r--r--alpha/dec/ln_copy.c288
-rw-r--r--alpha/include/mach/alpha/alpha_instruction.h690
-rw-r--r--alpha/include/mach/alpha/asm.h539
-rw-r--r--alpha/include/mach/alpha/boolean.h50
-rw-r--r--alpha/include/mach/alpha/exception.h88
-rw-r--r--alpha/include/mach/alpha/kern_return.h57
-rw-r--r--alpha/include/mach/alpha/machine_types.defs78
-rw-r--r--alpha/include/mach/alpha/syscall_sw.h139
-rw-r--r--alpha/include/mach/alpha/thread_status.h156
-rw-r--r--alpha/include/mach/alpha/vm_param.h103
-rw-r--r--alpha/include/mach/alpha/vm_types.h98
55 files changed, 15988 insertions, 0 deletions
diff --git a/ChangeLog b/ChangeLog
index 3760741b..1993c710 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,5 +1,61 @@
2002-05-27 Roland McGrath <roland@frob.com>
+ Alpha support files verbatim from CMU release MK83a.
+ * alpha/include/mach/alpha/alpha_instruction.h: New file.
+ * alpha/include/mach/alpha/asm.h: New file.
+ * alpha/include/mach/alpha/boolean.h: New file.
+ * alpha/include/mach/alpha/exception.h: New file.
+ * alpha/include/mach/alpha/kern_return.h: New file.
+ * alpha/include/mach/alpha/machine_types.defs: New file.
+ * alpha/include/mach/alpha/syscall_sw.h: New file.
+ * alpha/include/mach/alpha/thread_status.h: New file.
+ * alpha/include/mach/alpha/vm_param.h: New file.
+ * alpha/include/mach/alpha/vm_types.h: New file.
+ * alpha/alpha/alpha_cache.S: New file.
+ * alpha/alpha/alpha_copyin.S: New file.
+ * alpha/alpha/alpha_cpu.S: New file.
+ * alpha/alpha/alpha_cpu.h: New file.
+ * alpha/alpha/alpha_init.c: New file.
+ * alpha/alpha/alpha_instruction.c: New file.
+ * alpha/alpha/alpha_lock.S: New file.
+ * alpha/alpha/alpha_mem_ops.c: New file.
+ * alpha/alpha/alpha_misc.c: New file.
+ * alpha/alpha/alpha_scb.c: New file.
+ * alpha/alpha/alpha_scb.h: New file.
+ * alpha/alpha/alpha_startup.c: New file.
+ * alpha/alpha/ast.h: New file.
+ * alpha/alpha/ast_check.c: New file.
+ * alpha/alpha/ast_types.h: New file.
+ * alpha/alpha/autoconf.c: New file.
+ * alpha/alpha/c_misc.c: New file.
+ * alpha/alpha/clock.c: New file.
+ * alpha/alpha/clock.h: New file.
+ * alpha/alpha/context.S: New file.
+ * alpha/alpha/context.h: New file.
+ * alpha/alpha/cpu_number.h: New file.
+ * alpha/alpha/frame.h: New file.
+ * alpha/alpha/lock.h: New file.
+ * alpha/alpha/locore.S: New file.
+ * alpha/alpha/mach_param.h: New file.
+ * alpha/alpha/machspl.h: New file.
+ * alpha/alpha/parse_args.c: New file.
+ * alpha/alpha/pcb.c: New file.
+ * alpha/alpha/pmap.c: New file.
+ * alpha/alpha/pmap.h: New file.
+ * alpha/alpha/prom_interface.S: New file.
+ * alpha/alpha/prom_interface.h: New file.
+ * alpha/alpha/prom_routines.S: New file.
+ * alpha/alpha/prom_routines.h: New file.
+ * alpha/alpha/setjmp.h: New file.
+ * alpha/alpha/start.S: New file.
+ * alpha/alpha/supage.S: New file.
+ * alpha/alpha/thread.h: New file.
+ * alpha/alpha/time_stamp.h: New file.
+ * alpha/alpha/trap.c: New file.
+ * alpha/alpha/trap.h: New file.
+ * alpha/alpha/vm_tuning.h: New file.
+ * alpha/dec/ln_copy.c: New file.
+
* configure.in (VERSION): Set to 1.90 for mainlining of OSKit-Mach.
* configure: Regenerated.
diff --git a/alpha/alpha/alpha_cache.S b/alpha/alpha/alpha_cache.S
new file mode 100644
index 00000000..8b904b26
--- /dev/null
+++ b/alpha/alpha/alpha_cache.S
@@ -0,0 +1,91 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_cache.s,v $
+ * Revision 2.3 93/01/19 08:56:59 danner
+ * Added Dcache flushing. Should have not been necessary.
+ * [93/01/19 af]
+ *
+ * Revision 2.2 93/01/14 17:10:52 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:10:16 af]
+ *
+ * Created.
+ * [92/12/10 14:47:59 af]
+ *
+ */
+/*
+ * File: alpha_cache.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Alpha cache control operations.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+
+ .set noreorder
+
+/*
+ * Object:
+ * alphacache_Iflush EXPORTED function
+ *
+ * Flush (instruction) cache
+ *
+ * Arguments:
+ * none
+ *
+ * Just call the pal subrutine.
+ */
+LEAF(alphacache_Iflush,0)
+ call_pal op_imb
+ RET
+ END(alphacache_Iflush)
+
+/*
+ * Object:
+ * alphacache_Dflush EXPORTED function
+ *
+ * Flush (data) cache
+ *
+ * Arguments:
+ * phys_addr vm_offset_t
+ *
+ * Turn the argument into a PFN and call the pal subrutine.
+ */
+LEAF(alphacache_Dflush,0)
+ srl a0,13,a0
+ call_pal 1 /* cflush */
+ RET
+ END(alphacache_Dflush)
+
diff --git a/alpha/alpha/alpha_copyin.S b/alpha/alpha/alpha_copyin.S
new file mode 100644
index 00000000..67ce51b8
--- /dev/null
+++ b/alpha/alpha/alpha_copyin.S
@@ -0,0 +1,461 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_copyin.s,v $
+ * Revision 2.4 93/05/20 21:01:52 mrt
+ * Changed use of zero to ra in call to NESTED.
+ * [93/05/18 mrt]
+ *
+ * Revision 2.3 93/01/19 08:57:31 danner
+ * Added fast aligned_block_copy.
+ * [93/01/19 af]
+ *
+ * Revision 2.2 93/01/14 17:10:58 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:10:27 af]
+ *
+ * Created.
+ * [92/12/10 14:48:32 af]
+ */
+/*
+ * File: alpha_copyin.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Copy operations that require assembly coding
+ * because they use the thread recover technology.
+ * Besides copyin/copyout, kdb's bottom functions.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <cpus.h>
+#include <mach_kdb.h>
+
+#include <mach/alpha/asm.h>
+#include <mach/kern_return.h>
+#include <alpha/thread.h>
+#include <alpha/alpha_cpu.h>
+#include <mach/alpha/alpha_instruction.h>
+
+#include <assym.s>
+
+ .set noreorder
+
+/* BUGFIX: the assemblers do not seem to grok things like
+ lda $1 ,0x000003fe00000000
+ so we must put a bit too much knowledge in here about the KUSEG */
+
+#if __GNU_AS__
+#define load_KUSEG_END(reg) \
+ lda reg,0x3fe; \
+ sll reg,32,reg
+#else
+#define load_KUSEG_END(reg) \
+ lda reg,(KUSEG_END>>32); \
+ sll reg,32,reg
+#endif
+
+/*
+ * Object:
+ * copyin EXPORTED function
+ *
+ * Copy bytes from user space to kernel space
+ *
+ * Arguments:
+ * from char *
+ * to char *
+ * size unsigned
+ *
+ * Use the thread-recover technology and just call bcopy.
+ */
+# define FRAMESIZE (4*8+16)
+NESTED(copyin, 3, FRAMESIZE, ra, 0, 0)
+ ldgp gp,0(pv)
+ lda sp,-FRAMESIZE(sp)
+ stq ra,FRAMESIZE-8(sp)
+ stq s0,FRAMESIZE-16(sp)
+#if (NCPUS>1)
+ call_pal op_mfpr_whami
+#else
+ mov zero,v0
+#endif
+ lda s0,active_threads
+ s8addq v0,s0,s0
+ ldq s0,0(s0)
+
+ load_KUSEG_END(t0)
+ cmpult a0,t0,t1
+ beq t1,copy_error /* sneaker */
+ addq a0,a2,v0
+ subq v0,1,v0
+ cmpult v0,t0,t1
+ beq t1,copy_error /* sneaker */
+
+ lda t1,copy_error
+ stq t1,THREAD_RECOVER(s0)
+ CALL(bcopy)
+
+ addq zero,KERN_SUCCESS,v0
+ br zero,copy_ok
+
+copy_error:
+ addq zero,1,v0
+copy_ok:
+ stq zero,THREAD_RECOVER(s0)
+ ldq ra,FRAMESIZE-8(sp)
+ ldq s0,FRAMESIZE-16(sp)
+ lda sp,FRAMESIZE(sp)
+ RET
+ END(copyin)
+
+/*
+ * Object:
+ * copyout EXPORTED function
+ *
+ * Copy bytes from kernel space to user space
+ *
+ * Arguments:
+ * from char *
+ * to char *
+ * size unsigned
+ *
+ */
+NESTED(copyout, 3, FRAMESIZE, ra, 0, 0)
+ ldgp gp,0(pv)
+ lda sp,-FRAMESIZE(sp)
+ stq ra,FRAMESIZE-8(sp)
+ stq s0,FRAMESIZE-16(sp)
+#if (NCPUS>1)
+ call_pal op_mfpr_whami
+#else
+ mov zero,v0
+#endif
+ lda s0,active_threads
+ s8addq v0,s0,s0
+ ldq s0,0(s0)
+
+ load_KUSEG_END(t0)
+ cmpult a1,t0,t1
+ beq t1,copy_error /* sneaker */
+ addq a1,a2,v0
+ subq v0,1,v0
+ cmpult v0,t0,t1
+ beq t1,copy_error /* sneaker */
+
+ lda t1,copy_error
+ stq t1,THREAD_RECOVER(s0)
+ CALL(bcopy)
+
+ addq zero,KERN_SUCCESS,v0
+ br zero,copy_ok
+ END(copyout)
+
+
+/*
+ * Object:
+ * copyinmsg EXPORTED function
+ *
+ * Copy bytes from user space to kernel space.
+ * For message buffers (integral ints).
+ *
+ * Object:
+ * copyoutmsg EXPORTED function
+ *
+ * Copy bytes from kernel space to user space
+ * For message buffers (integral ints).
+ *
+ * Arguments:
+ * from char *
+ * to char *
+ * size unsigned
+ * Assumes size & 3 == 0 and size>>2 > 0.
+ *
+ * Doesn't use the thread-recover technology.
+ * The trap handler is responsible for fixing up faults,
+ * redirecting us to copymsg_error.
+ */
+
+EXPORT(copymsg_start)
+LEAF(copyinmsg,3)
+#if 0
+ blez a0,copymsg_error /* sneaker */
+ addq v0,a0,a2
+ blez v0,copymsg_error
+#endif
+ /*
+ * The write buffer on a pmax handles one store/six cycles.
+ * On a 3max, this loop might be worth unrolling.
+ * ON ALPHA I NEED TO MAKE A SECOND PASS OVER ALL THINGS
+ */
+
+1: ldl t0,0(a0)
+ addq a0,4,a0
+ stl t0,0(a1)
+ subq a2,4,a2
+ addq a1,4,a1
+ bne a2,1b
+
+ mov zero,v0
+ RET
+ END(copyinmsg)
+
+LEAF(copyoutmsg,3)
+#if 0
+ blez a1,copymsg_error /* sneaker */
+ addq v0,a1,a2
+ blez v0,copymsg_error
+#endif
+ /*
+ * The write buffer on a pmax handles one store/six cycles.
+ * On a 3max, this loop might be worth unrolling.
+ */
+
+1: ldl t0,0(a0)
+ addq a0,4,a0
+ stl t0,0(a1)
+ subq a2,4,a2
+ addq a1,4,a1
+ bne a2,1b
+
+ mov zero,v0
+ RET
+ END(copyoutmsg)
+EXPORT(copymsg_end)
+
+LEAF(copymsg_error,0)
+ addq zero,1,v0
+ RET
+ END(copymsg_error)
+
+/*
+ * Object:
+ * aligned_block_copy EXPORTED function
+ *
+ * Copy bytes from word-aligned location
+ * to word-aligned location.
+ *
+ * Arguments:
+ * from long *
+ * to long *
+ * size long
+ *
+ * Unrolled, hyperoptimized page-copy function.
+ * Addresses must be identically aligned, preferably
+ * on a cache line boundary.
+ * Count is a multiple of CHUNK_SIZE or else we overcopy.
+ *
+ * Performance issues:
+ * I wrote this to see how fast a page can be copied.
+ * Copying 8k from cache to cache runs at 293.6 Mb/sec.
+ */
+#define CACHE_LINE_SIZE 32
+#define CHUNK_SIZE (CACHE_LINE_SIZE*4)
+ .align 4
+
+ .set noreorder
+
+LEAF(aligned_block_copy,3)
+
+ /* fetch 4 cache lines */
+ ldq_u t2,0(a0)
+ ldq_u t3,(CACHE_LINE_SIZE)(a0)
+ ldq_u t4,(CACHE_LINE_SIZE*2)(a0)
+ ldq_u t5,(CACHE_LINE_SIZE*3)(a0)
+
+ /* fetch the rest of the first cache line */
+ ldq_u t6,8(a0)
+ ldq_u t7,16(a0)
+ ldq_u t8,24(a0)
+ /* add more inst if CACHE_LINE_SIZE changes */
+
+ /* adjust counter */
+ subq a2,CHUNK_SIZE,a2
+
+ /* fetch the rest of the second cache line */
+ ldq_u t9,(CACHE_LINE_SIZE+8)(a0)
+ ldq_u t10,(CACHE_LINE_SIZE+16)(a0)
+ ldq_u t11,(CACHE_LINE_SIZE+24)(a0)
+ /* add more ... */
+
+ /* Now for the stores, first cache line */
+ stq_u t2,0(a1)
+ stq_u t6,8(a1)
+ stq_u t7,16(a1)
+ stq_u t8,24(a1)
+
+ /* fetch third cache line */
+ ldq_u t6,((CACHE_LINE_SIZE*2)+8)(a0)
+ ldq_u t7,((CACHE_LINE_SIZE*2)+16)(a0)
+ ldq_u t8,((CACHE_LINE_SIZE*2)+24)(a0)
+
+ /* stores, second cache line */
+ stq_u t3,(CACHE_LINE_SIZE)(a1)
+ stq_u t9,(CACHE_LINE_SIZE+8)(a1)
+ stq_u t10,(CACHE_LINE_SIZE+16)(a1)
+ stq_u t11,(CACHE_LINE_SIZE+24)(a1)
+
+ /* fetch fourth cache line */
+ ldq_u t9,((CACHE_LINE_SIZE*3)+8)(a0)
+ ldq_u t10,((CACHE_LINE_SIZE*3)+16)(a0)
+ ldq_u t11,((CACHE_LINE_SIZE*3)+24)(a0)
+
+ /* stores, third cache line */
+ stq_u t4,(CACHE_LINE_SIZE*2)(a1)
+ stq_u t6,((CACHE_LINE_SIZE*2)+8)(a1)
+ stq_u t7,((CACHE_LINE_SIZE*2)+16)(a1)
+ stq_u t8,((CACHE_LINE_SIZE*2)+24)(a1)
+
+ /* last time round ? */
+ ble a2,finish_up
+ nop /* keep double issue */
+
+ /* Nope, do the last line, adjust pointers and repeast */
+ stq_u t5,(CACHE_LINE_SIZE*3)(a1)
+ stq_u t9,((CACHE_LINE_SIZE*3)+8)(a1)
+ stq_u t10,((CACHE_LINE_SIZE*3)+16)(a1)
+ stq_u t11,((CACHE_LINE_SIZE*3)+24)(a1)
+
+ addq a0,CHUNK_SIZE,a0
+ addq a1,CHUNK_SIZE,a1
+ br zero,bcopy
+ nop /* align */
+
+finish_up:
+ /* We must still do the stores of the fourth cache line */
+ stq_u t5,(CACHE_LINE_SIZE*3)(a1)
+ stq_u t9,((CACHE_LINE_SIZE*3)+8)(a1)
+ stq_u t10,((CACHE_LINE_SIZE*3)+16)(a1)
+ stq_u t11,((CACHE_LINE_SIZE*3)+24)(a1)
+
+ RET
+ END(aligned_block_copy)
+
+
+#if MACH_KDB
+/*
+ * Object:
+ * kdb_getiomem EXPORTED function
+ *
+ * Copy a word from kernel I/O space to memory
+ *
+ * Arguments:
+ * from char *
+ *
+ */
+LEAF(kdb_getiomem,1)
+#if 1
+call_pal 0x80
+#else
+ ldgp gp,0(pv)
+ lda v0,kdb_iomem_recover
+ stq v0,fast_thread_recover
+ andi t0,a0,3
+ bne t0,zero,g_by_shorts
+ andi t0,1
+ ldq v0,0(a0)
+ b kdb_iomem_ok
+ nop
+g_by_shorts:
+ bne t0,zero,g_by_bytes
+ nop
+ lhu v0,0(a0)
+ lhu t1,2(a0)
+ nop
+ sll t1,16
+ b kdb_iomem_ok
+ or v0,t1
+
+g_by_bytes:
+ lbu v0,0(a0)
+ lbu t1,1(a0)
+ lbu t2,2(a0)
+ lbu t3,3(a0)
+ sll t1,8
+ sll t2,16
+ sll t3,24
+ or v0,t1
+ or v0,t2
+ b kdb_iomem_ok
+ or v0,t3
+
+kdb_iomem_recover:
+ li v0,-1
+kdb_iomem_ok:
+ stq zero,fast_thread_recover
+ j ra
+ nop
+#endif
+ RET
+ END(kdb_getiomem)
+/*
+ * Object:
+ * kdb_putiomem EXPORTED function
+ *
+ * Copy a word from memory to kernel I/O space
+ *
+ * Arguments:
+ * to char *
+ * value unsigned
+ *
+ */
+LEAF(kdb_putiomem,2)
+#if 1
+call_pal 0x80
+#else
+ la v0,kdb_iomem_recover
+ stq v0,fast_thread_recover
+ andi t0,a0,3
+ bne t0,zero,byshorts
+ andi t0,1
+ stq a1,0(a0)
+ b kdb_iomem_ok
+ nop
+byshorts:
+ bne t0,zero,bybytes
+ srl t0,a1,16
+ sh a1,0(a0)
+ sh t0,2(a0)
+ b kdb_iomem_ok
+ nop
+
+bybytes:
+ sb a1,0(a0)
+ srl a1,8
+ sb a1,1(a0)
+ sb t0,2(a0)
+ srl t0,8
+ sb t0,3(a0)
+ b kdb_iomem_ok
+ nop
+#endif
+ RET
+ END(kdb_putiomem)
+#endif MACH_KDB
diff --git a/alpha/alpha/alpha_cpu.S b/alpha/alpha/alpha_cpu.S
new file mode 100644
index 00000000..2be2a630
--- /dev/null
+++ b/alpha/alpha/alpha_cpu.S
@@ -0,0 +1,500 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_cpu.s,v $
+ * Revision 2.2 93/02/05 07:57:18 danner
+ * Change mov inst. to or
+ * [93/01/12 jeffreyh]
+ * Added ops on MCES.
+ * [93/01/15 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:10:52 af]
+ *
+ * Created.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: alpha_cpu.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * CPU related miscellaneous operations
+ * Includes common operations on the status register (spl)
+ * and other CPU registers, and the FPA registers.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+#include <cpus.h>
+
+
+#include <mach/alpha/asm.h>
+#include <alpha/alpha_cpu.h>
+#include <mach/alpha/vm_param.h>
+#include <mach/alpha/alpha_instruction.h>
+#include <alpha/thread.h>
+#include <mach/exception.h>
+
+#include <assym.s>
+
+ .set noreorder
+
+/*
+ * Object:
+ * wbflush EXPORTED function
+ *
+ * Wait for writes to complete
+ *
+ * Arguments:
+ * none
+ */
+LEAF(wbflush,0)
+ or s0,zero,t1
+ or s1,zero,t2
+ mb
+ or t1,zero,s0
+ or t2,zero,s1
+ RET
+ END(wbflush)
+
+
+/*
+ * Object:
+ * alpha_fpa_unload EXPORTED function
+ *
+ * Checkpoint FPA status.
+ *
+ * Arguments:
+ * state alpha_float_state *
+ *
+ * Assumes FPA *is* currently usable
+ */
+LEAF(alpha_fpa_unload,1)
+
+ trapb /* flush fpa pipes */
+
+ stt $f30,MFS_REGS+30*8(a0)
+ mf_fpcr $f30
+ stt $f29, MFS_REGS+29*8(a0)
+ stt $f28, MFS_REGS+28*8(a0)
+ stt $f27, MFS_REGS+27*8(a0)
+ stt $f30, MFS_CSR(a0) /* r31, that is */
+ stt $f26, MFS_REGS+26*8(a0)
+ stt $f25, MFS_REGS+25*8(a0)
+ stt $f24, MFS_REGS+24*8(a0)
+ stt $f23, MFS_REGS+23*8(a0)
+ stt $f22, MFS_REGS+22*8(a0)
+ stt $f21, MFS_REGS+21*8(a0)
+ stt $f20, MFS_REGS+20*8(a0)
+ stt $f19, MFS_REGS+19*8(a0)
+ stt $f18, MFS_REGS+18*8(a0)
+ stt $f17, MFS_REGS+17*8(a0)
+ stt $f16, MFS_REGS+16*8(a0)
+ stt $f15, MFS_REGS+15*8(a0)
+ stt $f14, MFS_REGS+14*8(a0)
+ stt $f13, MFS_REGS+13*8(a0)
+ stt $f12, MFS_REGS+12*8(a0)
+ stt $f11, MFS_REGS+11*8(a0)
+ stt $f10, MFS_REGS+10*8(a0)
+ stt $f9, MFS_REGS+ 9*8(a0)
+ stt $f8, MFS_REGS+ 8*8(a0)
+ stt $f7, MFS_REGS+ 7*8(a0)
+ stt $f6, MFS_REGS+ 6*8(a0)
+ stt $f5, MFS_REGS+ 5*8(a0)
+ stt $f4, MFS_REGS+ 4*8(a0)
+ stt $f3, MFS_REGS+ 3*8(a0)
+ stt $f2, MFS_REGS+ 2*8(a0)
+ stt $f1, MFS_REGS+ 1*8(a0)
+ stt $f0, MFS_REGS+ 0*8(a0)
+
+ /* disable fpa */
+ or zero,zero,a0
+ call_pal op_mtpr_fen
+
+ RET
+ END(alpha_fpa_unload)
+
+/*
+ * Object:
+ * alpha_fpa_loadup EXPORTED function
+ *
+ * Restore FPA status.
+ *
+ * Arguments:
+ * state alpha_float_state *
+ *
+ * Assumes FPA *is* currently usable
+ */
+LEAF(alpha_fpa_loadup,1)
+
+ /* enable fpa first */
+ or a0,zero,t4 /* safe from PAL code */
+ addq zero,1,a0
+ call_pal op_mtpr_fen /* enable fpa for the kernel */
+ or t4,zero,a0
+
+ ldt $f30, MFS_CSR(a0) /* r31, that is */
+ ldt $f29, MFS_REGS+29*8(a0)
+ ldt $f28, MFS_REGS+28*8(a0)
+ ldt $f27, MFS_REGS+27*8(a0)
+ mt_fpcr $f30
+ ldt $f30, MFS_REGS+30*8(a0)
+ ldt $f26, MFS_REGS+26*8(a0)
+ ldt $f25, MFS_REGS+25*8(a0)
+ ldt $f24, MFS_REGS+24*8(a0)
+ ldt $f23, MFS_REGS+23*8(a0)
+ ldt $f22, MFS_REGS+22*8(a0)
+ ldt $f21, MFS_REGS+21*8(a0)
+ ldt $f20, MFS_REGS+20*8(a0)
+ ldt $f19, MFS_REGS+19*8(a0)
+ ldt $f18, MFS_REGS+18*8(a0)
+ ldt $f17, MFS_REGS+17*8(a0)
+ ldt $f16, MFS_REGS+16*8(a0)
+ ldt $f15, MFS_REGS+15*8(a0)
+ ldt $f14, MFS_REGS+14*8(a0)
+ ldt $f13, MFS_REGS+13*8(a0)
+ ldt $f12, MFS_REGS+12*8(a0)
+ ldt $f11, MFS_REGS+11*8(a0)
+ ldt $f10, MFS_REGS+10*8(a0)
+ ldt $f9, MFS_REGS+ 9*8(a0)
+ ldt $f8, MFS_REGS+ 8*8(a0)
+ ldt $f7, MFS_REGS+ 7*8(a0)
+ ldt $f6, MFS_REGS+ 6*8(a0)
+ ldt $f5, MFS_REGS+ 5*8(a0)
+ ldt $f4, MFS_REGS+ 4*8(a0)
+ ldt $f3, MFS_REGS+ 3*8(a0)
+ ldt $f2, MFS_REGS+ 2*8(a0)
+ ldt $f1, MFS_REGS+ 1*8(a0)
+ ldt $f0, MFS_REGS+ 0*8(a0)
+
+ RET
+ END(alpha_fpa_loadup)
+
+/*
+ * Object:
+ * mtpr_fen EXPORTED function
+ *
+ * Brutally enable/disable fpa usage.
+ *
+ * Arguments:
+ * a0 boolean_t
+ */
+LEAF(mtpr_fen,1)
+ call_pal op_mtpr_fen /* enable fpa for the kernel */
+ RET
+ END(simple_splx)
+
+LEAF(mtpr_usp,1)
+ call_pal op_mtpr_usp
+ RET
+ END(mtpr_usp)
+LEAF(mfpr_usp,0)
+ call_pal op_mfpr_usp
+ RET
+ END(mtpr_usp)
+
+LEAF(mfpr_mces,0)
+ call_pal op_mfpr_mces
+ RET
+ END(mfpr_mces)
+LEAF(mtpr_mces,1)
+ call_pal op_mtpr_mces
+ RET
+ END(mtpr_mces)
+
+/*
+ * Object:
+ * alpha_swap_ipl EXPORTED function
+ *
+ * Change priority level, return current one
+ *
+ * Arguments:
+ * a0 unsigned
+ *
+ * Set priority level to the value in a0, returns the current
+ * priority level. Stright call to PAL code.
+ */
+LEAF(alpha_swap_ipl,1)
+ call_pal op_mtpr_ipl
+ RET
+ END(simple_splx)
+
+/*
+ * Object:
+ * kdbsplhigh EXPORTED function
+ *
+ * Block all interrupts
+ *
+ * Arguments:
+ * none
+ *
+ * Returns the previous content of the status register
+ * [Separate from above to allow sstepping]
+ *
+ * Object:
+ * kdbsplx EXPORTED function
+ *
+ * Restore priority level
+ *
+ * Arguments:
+ * a0 unsigned
+ *
+ */
+LEAF(kdbsplhigh,0)
+ addq zero,ALPHA_IPL_HIGH,a0
+XLEAF(kdbsplx,1)
+ call_pal op_mtpr_ipl
+ RET
+ END(kdbsplhigh)
+
+/*
+ * Object:
+ * setsoftclock EXPORTED function
+ *
+ * Schedule a software clock interrupt
+ *
+ * Arguments:
+ * none
+ *
+ * Software interrupts are generated by writing into the SIRR
+ * register. HW clears this bit.
+ * In Mach only one software interrupt is used.
+ */
+LEAF(setsoftclock,0)
+ addq zero,1,a0
+ call_pal op_mtpr_sirr
+ RET
+ END(setsoftclock)
+
+/*
+ * Object:
+ * cpu_number EXPORTED function
+ *
+ * Return current processor number
+ *
+ * Arguments:
+ * none
+ *
+ * Use the internal Who-Am-I register.
+ */
+LEAF(cpu_number,0)
+ call_pal op_mfpr_whami
+ RET
+ END(cpu_number)
+
+/*
+ * Object:
+ * interrupt_processor EXPORTED function
+ *
+ * Send an interrupt to a processor
+ *
+ * Arguments:
+ * procnum int
+ *
+ */
+LEAF(interrupt_processor,1)
+ call_pal op_mtpr_ipir
+ RET
+ END(interrupt_processor)
+
+/*
+ * Object:
+ * current_thread EXPORTED function
+ *
+ * Return current thread
+ *
+ * Arguments:
+ * none
+ *
+ * Use the internal processor-base register.
+ */
+#if 0
+LEAF(current_thread,0)
+ call_pal op_mfpr_prbr
+ RET
+ END(current_thread)
+#endif
+/*
+ * Object:
+ * set_current_thread EXPORTED function
+ *
+ * Set the current thread register
+ *
+ * Arguments:
+ * thread thread_t
+ *
+ * Use the internal processor-base register.
+ */
+LEAF(set_current_thread,1)
+ call_pal op_mtpr_prbr
+ RET
+ END(set_current_thread)
+
+
+/*
+ * Object:
+ * swpctxt EXPORTED function
+ *
+ * Change HW process context
+ *
+ * Arguments:
+ * pcb PHYSICAL struct pcb_hw *
+ * old_pcb VIRTUAL struct pcb_hw *
+ *
+ * Execute the PAL call. If old_pcb is non-zero it saves
+ * the current KSP in it.
+ */
+LEAF(swpctxt,2)
+ beq a1,1f
+ stq sp,0(a1)
+1: call_pal op_swpctxt
+ RET
+ END(swpctxt)
+
+/*
+ * Object:
+ * tbis EXPORTED function
+ *
+ * Invalidate TLB entry
+ *
+ * Arguments:
+ * addr unsigned long
+ *
+ */
+LEAF(tbis,1)
+ call_pal op_mtpr_tbis
+ RET
+ END(tbis)
+
+/*
+ * Object:
+ * tbiap EXPORTED function
+ *
+ * Invalidate Process-owned TLB entries
+ *
+ * Arguments:
+ * none
+ *
+ */
+LEAF(tbiap,0)
+ call_pal op_mtpr_tbiap
+ RET
+ END(tbiap)
+
+/*
+ * Object:
+ * tbia EXPORTED function
+ *
+ * Invalidate all TLB entries
+ *
+ * Arguments:
+ * none
+ *
+ */
+LEAF(tbia,0)
+ call_pal op_mtpr_tbia
+ RET
+ END(tbia)
+
+
+/*
+ * Object:
+ * rpcc EXPORTED function
+ *
+ * Read process cycle counter
+ *
+ * Arguments:
+ * none
+ *
+ */
+LEAF(rpcc,0)
+ rpcc v0
+ RET
+ END(rpcc)
+
+/*
+ * Object:
+ * delay EXPORTED function
+ * machine_cycles_per_usec EXPORTED unsigned long
+ *
+ * Busy loop for a given number of microseconds
+ *
+ * Arguments:
+ * usecs unsigned long
+ *
+ * I thought of using the internal Cycle Counter, but it
+ * is only 32 bits and the masking and overflow issues
+ * would take more than necessary. Besides, that way
+ * you still do not get an absolute-time delay: you get
+ * screwed across ctxt switches (yes, the kernel is not
+ * pre-emptible right now, but) for instance.
+ * So let's do it as usual.
+ */
+ .data
+ .globl machine_cycles_per_usec
+machine_cycles_per_usec: .quad 0 /* needs init */
+
+ .text
+ .align 5
+ .set noat
+LEAF(delay,1)
+
+#define FIXED_OVERHEAD 0x6c /* measured on ADU @150 Mhz */
+#define LOOP_OVERHEAD 3 /* measured on ADU @150 Mhz */
+
+ lda v0,machine_cycles_per_usec
+ ldq v0,0(v0)
+ mulq a0,v0,a0 /* usecs->cycles */
+ subq a0,FIXED_OVERHEAD,a0
+1: subq a0,LOOP_OVERHEAD,a0
+ bgt a0,1b
+ RET
+
+ END(delay)
+
+ .align 5
+LEAF(delay_overh,1)
+ rpcc t0
+
+ lda v0,machine_cycles_per_usec
+ ldq v0,0(v0)
+ mulq a0,v0,a0 /* usecs->cycles */
+ subq a0,FIXED_OVERHEAD,a0
+1: subq a0,LOOP_OVERHEAD,a0
+ bgt a0,1b
+
+ rpcc v0
+ rpcc a1
+ subq v0,t0,t0
+ subq a1,v0,v0
+ subq t0,v0,v0
+ RET
+ END(delay_overh)
+
diff --git a/alpha/alpha/alpha_cpu.h b/alpha/alpha/alpha_cpu.h
new file mode 100644
index 00000000..6b714acb
--- /dev/null
+++ b/alpha/alpha/alpha_cpu.h
@@ -0,0 +1,208 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_cpu.h,v $
+ * Revision 2.2 93/02/05 07:57:12 danner
+ * Added machine-check error register defines.
+ * [93/02/04 00:42:24 af]
+ *
+ * Added reference to documentation source(s).
+ * [92/12/16 15:10:40 af]
+ *
+ * Created.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: alpha_cpu.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Definitions for the ALPHA Architecture.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "DECChip 21064-AA RISC Microprocessor Preliminary Data Sheet"
+ * Apr 92, Digital Equipment Corporation, Maynard MA
+ * Available for anon ftp on the host gatekeeper.dec.com
+ */
+
+#ifndef _ALPHA_ALPHA_CPU_H_
+#define _ALPHA_ALPHA_CPU_H_ 1
+
+/*
+ * The Alpha virtual address space is logically a flat 64 bit one.
+ * Implementations of the architecture so far provide a small help
+ * to the OS called "Superpage mapping", which is a predefined
+ * virtual-->physical translation (when in kernel) mode that helps
+ * avoid TLB misses in critical code sections.
+ *
+ * We assume this feature WILL be provided in all implementations.
+ */
+
+/*
+ * The ITB maps va<33:13> --> pa<33:13> when va<42:41> == 2
+ * Plus, THERE IS CHECKED SIGN EXTENSION
+ */
+#define SUPERPAGE_I_MASK 0x0000060000000000
+#define SUPERPAGE_I_PAGE 0x0000040000000000
+
+#define SUPERPAGE_I_START 0xfffffc0000000000
+#define SUPERPAGE_I_SIZE 0x0000020000000000
+
+
+/*
+ * The DTB does the same, plus there is a second mapping
+ * to pa<33:30>==0 when va<42:30>==1ffe
+ */
+#define SUPERPAGE_D_MASK SUPERPAGE_I_MASK
+#define SUPERPAGE_D_PAGE SUPERPAGE_I_PAGE
+#define SUPERPAGE_D_START SUPERPAGE_I_START
+#define SUPERPAGE_D_SIZE SUPERPAGE_I_SIZE
+
+#define SUPERPAGE_IO_MASK 0x000007ff80000000
+#define SUPERPAGE_IO_PAGE SUPERPAGE_IO_MASK
+#define SUPERPAGE_IO_START 0xffffffff80000000
+#define SUPERPAGE_IO_SIZE 0x0000000080000000
+
+
+/*
+ * We divide the address space into five segments:
+ *
+ * kuseg: user virtual space
+ * k0seg: kernel space, directly mapped, cached
+ * k2seg: kernel virtual space
+ * k1seg: kernel space, directly mapped, uncached
+ * k3seg: user virtual space
+ * k4seg: unused (kernel virtual space, uncached)
+ *
+ * Current implementations of the chip do not support k3seg.
+ * They actually do sign-extension on addresses, killing it.
+ *
+ */
+#define KUSEG_BASE 0 /* from ..0000.00. */
+#define KUSEG_SIZE K2SEG_BASE /* to ..03fe.00. */
+#define KUSEG_END KUSEG_SIZE
+#define K2SEG_BASE (K3SEG_BASE-K2SEG_SIZE) /* from ..03fe.00. */
+#define K2SEG_SIZE 0x0000000200000000 /* to ..0400.00. */
+#define K3SEG_BASE SUPERPAGE_I_PAGE /* from ..0400.00. */
+#define K3SEG_SIZE (K0SEG_BASE-K3SEG_BASE) /* to ..fc00.00. */
+#define K0SEG_BASE SUPERPAGE_I_START /* from ..fc00.00. */
+#define K0SEG_SIZE SUPERPAGE_I_SIZE /* to ..fe00.00. */
+#define K4SEG_BASE 0xfffffe0000000000 /* from ..fe00.00. */
+#define K4SEG_SIZE 0x000001ff80000000 /* to ..ffff.80. */
+#define K1SEG_BASE SUPERPAGE_IO_START /* from ..ffff.80. */
+#define K1SEG_SIZE SUPERPAGE_IO_SIZE /* to ..ffff.ff. */
+
+/*
+ * Predicates.
+ * Note: revise this if k3seg
+ */
+#define ISA_KUSEG(x) ((vm_offset_t)(x) < K2SEG_BASE)
+#define ISA_K0SEG(x) (((vm_offset_t)(x) >= K0SEG_BASE) && \
+ ((vm_offset_t)(x) < K4SEG_BASE))
+#define ISA_K1SEG(x) ((vm_offset_t)(x) >= K1SEG_BASE)
+#define ISA_K2SEG(x) (((vm_offset_t)(x) >= K2SEG_BASE) && \
+ ((vm_offset_t)(x) < K3SEG_BASE))
+
+/*
+ * Kernel segments 0 and 1 are directly mapped to
+ * physical memory, starting at physical address 0.
+ *
+ */
+#define K0SEG_TO_PHYS(x) ((vm_offset_t)(x) & 0x00000003ffffffff)
+#define PHYS_TO_K0SEG(x) ((vm_offset_t)(x) | K0SEG_BASE)
+
+#define K1SEG_TO_PHYS(x) K0SEG_TO_PHYS(x)
+#define PHYS_TO_K1SEG(x) ((vm_offset_t)(x) | K1SEG_BASE)
+
+#define K0SEG_TO_K1SEG(x) ((vm_offset_t)(x) | SUPERPAGE_IO_PAGE)
+#define K1SEG_TO_K0SEG(x) PHYS_TO_K0SEG(K1SEG_TO_PHYS(x))
+
+/*
+ * Architecturally defined registers
+ */
+
+/*
+ * Program status word
+ */
+#define PS_STACK_ALIGNMENT 0x3f00000000000000 /* in saved PS */
+#define PS_zero 0xc0ffffffffffe060
+#define PS_IPL_MASK 0x0000000000001f00
+#define PS_IPL_SHIFT 8
+#define PS_VIRTUAL_MACHINE 0x0000000000000080
+#define PS_CURRENT_MODE 0x0000000000000018
+#define PS_MODE_SHIFT 3
+#define PS_SOFTWARE 0x0000000000000007
+
+#define ALPHA_IPL_0 0
+#define ALPHA_IPL_SOFTC 8
+#define ALPHA_IPL_IO 21
+#define ALPHA_IPL_CLOCK 22
+#define ALPHA_IPL_HIGH 23
+
+#define PS_KERNEL_MODE 0
+#define PS_EXECUTIVE_MODE 1
+#define PS_SUPERVISOR_MODE 2
+#define PS_USER_MODE 3
+
+#define alpha_user_mode(ps) \
+ ((((ps) & PS_CURRENT_MODE) >> PS_MODE_SHIFT) == PS_USER_MODE)
+
+#define alpha_initial_ps_value \
+ (PS_USER_MODE << PS_MODE_SHIFT)
+
+/*
+ * Floating point control register
+ */
+
+#define FPCR_SUM 0x8000000000000000
+#define FPCR_raz 0x700fffffffffffff
+#define FPCR_DYN_RM 0x0c00000000000000
+#define FPCR_IOV 0x0200000000000000
+#define FPCR_INE 0x0100000000000000
+#define FPCR_INF 0x0080000000000000
+#define FPCR_OVF 0x0040000000000000
+#define FPCR_DZE 0x0020000000000000
+#define FPCR_INV 0x0010000000000000
+
+
+/*
+ * Machine check error register
+ */
+
+#define MCES_MCK 0x1
+#define MCES_SCE 0x2
+#define MCES_PCE 0x4
+#define MCES_DPC 0x8
+#define MCES_DSC 0x10
+
+
+#endif /* _ALPHA_ALPHA_CPU_H_ */
diff --git a/alpha/alpha/alpha_init.c b/alpha/alpha/alpha_init.c
new file mode 100644
index 00000000..0a773af1
--- /dev/null
+++ b/alpha/alpha/alpha_init.c
@@ -0,0 +1,316 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_init.c,v $
+ * Revision 2.5 93/05/15 19:10:51 mrt
+ * machparam.h -> machspl.h
+ *
+ * Revision 2.4 93/03/11 13:57:46 danner
+ * Corrected boot time page stealing commentary.
+ * [93/03/11 danner]
+ *
+ * Revision 2.3 93/03/09 10:49:03 danner
+ * GCC quiets, protos, standard boothowto, lint.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/02/05 07:57:03 danner
+ * No more ISP hacks. Now we parse args.
+ * Fixed bug in memory stealing call.
+ * [93/02/04 00:56:23 af]
+ *
+ * MP Icache sanity call, help Jeffrey in dprintf by delaying
+ * dropping of bootstrap VM spaces.
+ * [93/01/15 af]
+ * Set vm_page_big_pagenum to support vm_page_grab_contiguous_pages.
+ * [92/12/25 01:42:54 af]
+ *
+ * Added reference to doc for the HWRPB &co.
+ * [92/12/22 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:11:03 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+/*
+ * File: alpha_init.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Basic initialization for Alpha
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "VMS for Alpha Platforms Internals and Data Structures"
+ * Digital Press 1992, Burlington, MA 01803
+ * Order number EY-L466E-P1/2, ISBN 1-55558-095-5
+ * [Especially volume 1, chapter 33 "Bootstrap processing"]
+ */
+
+#include <mach_kdb.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <kern/thread.h>
+#include <alpha/alpha_cpu.h>
+#include <sys/reboot.h>
+#include <mach/vm_param.h>
+#include <alpha/prom_interface.h>
+#include <alpha/thread.h>
+
+/*
+ * Memory
+ */
+vm_offset_t memory_start, avail_start, avail_end;
+vm_offset_t virtual_avail, virtual_end;
+vm_size_t mem_size;
+vm_size_t memlimit = 0; /* patchable to run in less memory */
+
+vm_size_t (*alphabox_steal_memory)() = 0;
+
+extern char end[], edata[];
+char *esym = (char *)0;
+
+extern struct pcb boot_pcb;
+
+#if MACH_KDB
+int boothowto = RB_KDB;
+#else MACH_KDB
+int boothowto = 0;
+#endif MACH_KDB
+
+static init_memory( vm_offset_t first_page); /* forward */
+
+alpha_init( boolean_t gcc_compiled )
+{
+ vm_offset_t first_page; /* first free physical page */
+ int i;
+ struct thread fake_th; /* while sizing memory, and for KDB */
+ struct task fake_tk;
+ extern int pmap_max_asn;
+
+ /*
+ * Only the master cpu can get here.
+ */
+
+ (void) splhigh(); /* sanity precautions */
+ tbia();
+ alphacache_Iflush();
+
+ init_prom_interface();
+
+ /*
+ * Move symbol table and bootstrap code
+ * out of BSS.
+ */
+ pmap_max_asn = alpha_hwrpb->maximum_asn;
+ PAGE_SIZE = alpha_hwrpb->page_size;
+ vm_set_page_size();
+
+ first_page = move_bootstrap();
+
+ /*
+ * Tune the delay() function
+ */
+ {
+ extern natural_t machine_cycles_per_usec;
+
+ machine_cycles_per_usec =
+ alpha_hwrpb->cycle_counter_resolution / 1000000;
+ }
+
+#if MACH_KDB
+ kdb_init(gcc_compiled);
+#endif
+
+ bzero(edata, end - edata); /* zero bss */
+
+ /*
+ * Fake some variables so that we can take traps right away.
+ * This is important while sizing memory, and in case KDB
+ * is invoked before the first thread is set up.
+ *
+ * We will also be allocating VM before switching to the
+ * first thread, so we need to setup our HW process context
+ * immediately.
+ */
+ bzero(&fake_th, sizeof fake_th);
+ bzero(&fake_tk, sizeof fake_tk);
+ fake_th.task = &fake_tk;
+ fake_th.pcb = &boot_pcb;
+ boot_pcb.mss.framep = &boot_pcb.mss.saved_frame;
+
+ {
+ extern pt_entry_t root_kpdes[];
+ boot_pcb.mss.hw_pcb.ptbr =
+ alpha_btop(K0SEG_TO_PHYS(root_kpdes));
+ }
+ set_current_thread(&fake_th);
+ master_cpu = cpu_number();
+ active_threads[master_cpu] = &fake_th;
+ swpctxt(K0SEG_TO_PHYS(&boot_pcb), &boot_pcb.mss.hw_pcb.ksp);
+
+ /*
+ * Stop the clock for now.
+ */
+ stopclocks();
+
+ set_root_name();
+
+ /*
+ * First available page of phys mem
+ */
+ first_page = alpha_btop(K0SEG_TO_PHYS(alpha_round_page(first_page)));
+
+ /*
+ * Parse command line, setting appropriate switches and
+ * configuration values.
+ * NOTE: Before cnprobe() is called you must do dprintf;
+ * printf won't work.
+ */
+
+ /* First set some defaults */
+
+ parse_args();
+
+ init_memory(first_page);
+
+ /*
+ * Initialize the machine-dependent portion of the VM system
+ */
+ pmap_bootstrap();
+
+ alpha_box_model_dep(); /* cold adaptation */
+
+ /* if we need big contiguous chunks then we do */
+ if (alphabox_steal_memory) {
+ /*
+ * Boot time page stealing. Good reasons so far:
+ * - console data structures (shared screen info)
+ */
+ vm_size_t needed;
+
+ needed = (*alphabox_steal_memory)(PHYS_TO_K0SEG(avail_start));
+ avail_start += round_page(needed);
+ }
+
+ cons_find(1); /* initialize console device: MUST be there */
+#if 0
+ pmap_rid_of_console(); /* here, whence debuged */
+#endif
+
+#if MACH_KDB
+ if ((boothowto&RB_HALT) && (boothowto&RB_KDB))
+ gimmeabreak();
+#endif MACH_KDB
+
+ printf("Alpha boot: memory from 0x%x to 0x%x\n",
+ memory_start, avail_end);
+ printf("Kernel virtual space from %#X to %#X.\n",
+ virtual_avail, virtual_end);
+ printf("Available physical space from %#X to %#X\n",
+ avail_start, avail_end);
+
+#if 1
+ pmap_rid_of_console();
+#endif
+
+ machine_startup();
+
+}
+
+#if (NCPUS > 1)
+
+alpha_slave_init()
+{
+ struct thread fake_th; /* while sizing memory, and for KDB */
+ struct task fake_tk;
+
+ splhigh();
+
+ bzero(&fake_th, sizeof fake_th);
+ bzero(&fake_tk, sizeof fake_tk);
+ fake_th.task = &fake_tk;
+ fake_th.pcb = &boot_pcb;
+ boot_pcb.mss.framep = &boot_pcb.mss.saved_frame;
+ active_threads[cpu_number()] = &fake_th;
+ set_current_thread(&fake_th);
+ swpctxt(K0SEG_TO_PHYS(&boot_pcb), &boot_pcb.mss.hw_pcb.ksp);
+
+ tbia();
+ alphacache_Iflush();
+
+ slave_main();
+}
+#endif (NCPUS > 1)
+
+static
+init_memory( vm_offset_t first_page)
+{
+ register vm_size_t i, j, max;
+ extern vm_size_t vm_page_big_pagenum;
+ struct memory_data_descriptor_table *mddt;
+
+ /*
+ * See how much memory we have
+ */
+ mddt = (struct memory_data_descriptor_table *)
+ ((char*)alpha_hwrpb + alpha_hwrpb->memory_data_descriptor_table_offset);
+
+ max = memlimit ? alpha_btop(memlimit) : alpha_btop(K0SEG_SIZE);
+ i = first_page;
+ for (j = 0; j < mddt->num_clusters; j++) {
+ vm_size_t first, last;
+
+ first = mddt->mem_clusters[j].first_pfn;
+ last = first + mddt->mem_clusters[j].num_pfn;
+
+ if (i >= first && i < last) {
+ memory_start = alpha_ptob(first);
+ i = last;
+ break;
+ }
+ }
+
+ /*
+ * If we should run in less memory then we should.
+ */
+ if (memlimit && (i > max))
+ i = max;
+ vm_page_big_pagenum = i;
+ mem_size = alpha_ptob(i);
+
+ /*
+ * Notify the VM system of what memory looks like
+ */
+ avail_start = (vm_offset_t)alpha_ptob(first_page);
+ avail_end = (vm_offset_t)mem_size;
+}
+
diff --git a/alpha/alpha/alpha_instruction.c b/alpha/alpha/alpha_instruction.c
new file mode 100644
index 00000000..ec491b5a
--- /dev/null
+++ b/alpha/alpha/alpha_instruction.c
@@ -0,0 +1,408 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_instruction.c,v $
+ * Revision 2.2 93/01/14 17:11:16 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:11:14 af]
+ *
+ * Created.
+ * [92/06/01 af]
+ *
+ */
+/*
+ * File: alpha_instruction.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Functions that operate on ALPHA instructions,
+ * such as branch prediction and opcode predicates.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <mach_kdb.h>
+
+#include <mach/alpha/alpha_instruction.h>
+#include <mach/exception.h>
+#include <alpha/alpha_cpu.h>
+#include <mach/mach_types.h>
+#include <kern/task.h>
+#include <alpha/thread.h>
+
+/*
+ * Object:
+ * isa_call EXPORTED function
+ *
+ * Function call predicate
+ *
+ */
+boolean_t
+isa_call(ins)
+ register alpha_instruction ins;
+{
+ return ((ins.branch_format.opcode == op_bsr) ||
+ ((ins.jump_format.opcode == op_j) &&
+ (ins.jump_format.action & 1))); /* covers jsr & jcr */
+}
+
+
+/*
+ * Object:
+ * isa_ret EXPORTED function
+ *
+ * Function return predicate
+ *
+ */
+boolean_t
+isa_ret(ins)
+ register alpha_instruction ins;
+{
+ return ((ins.jump_format.opcode == op_j) &&
+ (ins.jump_format.action == op_ret));
+}
+
+
+/*
+ * Object:
+ * isa_rei EXPORTED function
+ *
+ * Return from interrupt predicate
+ *
+ */
+boolean_t
+isa_rei(ins)
+ register alpha_instruction ins;
+{
+ return ((ins.pal_format.opcode == op_pal) &&
+ (ins.pal_format.function == op_rei));
+}
+
+/*
+ * Object:
+ * isa_branch EXPORTED function
+ *
+ * Branch predicate
+ *
+ * Does NOT include function calls, use isa_call() for that.
+ * Includes all other jump and branch instructions (ret included)
+ */
+boolean_t
+isa_branch(ins)
+ register alpha_instruction ins;
+{
+ switch (ins.branch_format.opcode) {
+
+ case op_j:
+ case op_br:
+ case op_fbeq:
+ case op_fblt:
+ case op_fble:
+ case op_fbne:
+ case op_fbge:
+ case op_fbgt:
+ case op_blbc:
+ case op_beq:
+ case op_blt:
+ case op_ble:
+ case op_blbs:
+ case op_bne:
+ case op_bge:
+ case op_bgt:
+ return TRUE;
+
+ default:
+ return FALSE;
+ }
+}
+
+/*
+ * Object:
+ * inst_unconditonal_flow_transfer EXPORTED function
+ *
+ * return true for instructions that result in
+ * unconditional transfers of the flow of control.
+ *
+ */
+boolean_t
+inst_unconditional_flow_transfer(ins)
+ register alpha_instruction ins;
+{
+ switch (ins.branch_format.opcode) {
+
+ case op_j:
+ case op_br:
+ return TRUE;
+
+ case op_pal:
+ return ((ins.pal_format.function == op_rei) ||
+ (ins.pal_format.function == op_chmk));
+
+ }
+
+ return FALSE;
+
+}
+
+/*
+ * Object:
+ * isa_spill EXPORTED function
+ *
+ * Register save (spill) predicate
+ *
+ */
+boolean_t
+isa_spill(ins,regn)
+ register alpha_instruction ins;
+ register unsigned regn;
+{
+ return ((ins.mem_format.opcode == op_stq) &&
+ (ins.mem_format.rd == regn));
+}
+
+/*
+ * Object:
+ * isa_load EXPORTED function
+ *
+ * Memory load predicate.
+ *
+ */
+boolean_t
+isa_load(ins)
+ register alpha_instruction ins;
+{
+ return
+ /* loads */
+ (ins.mem_format.opcode == op_ldq_u) ||
+ ((op_ldf <= ins.mem_format.opcode) &&
+ (ins.mem_format.opcode <= op_ldt)) ||
+ ((op_ldl <= ins.mem_format.opcode) &&
+ (ins.mem_format.opcode <= op_ldq_l)) ||
+ /* prefetches */
+ ((ins.mem_format.opcode == op_special) &&
+ ((ins.mem_format.displacement == (short)op_fetch) ||
+ (ins.mem_format.displacement == (short)op_fetch_m))) ;
+ /* note: MB is treated as a store */
+}
+
+/*
+ * Object:
+ * isa_store EXPORTED function
+ *
+ * Memory store predicate.
+ *
+ */
+boolean_t
+isa_store(ins)
+ register alpha_instruction ins;
+{
+ return
+ /* loads */
+ (ins.mem_format.opcode == op_stq_u) ||
+ ((op_stf <= ins.mem_format.opcode) &&
+ (ins.mem_format.opcode <= op_stt)) ||
+ ((op_stl <= ins.mem_format.opcode) &&
+ (ins.mem_format.opcode <= op_stq_c)) ||
+ /* barriers */
+ ((ins.mem_format.opcode == op_special) &&
+ (ins.mem_format.displacement == op_mb));
+}
+
+/*
+ * Object:
+ * isa_load_store EXPORTED function
+ *
+ * Memory load/store predicate
+ *
+ * If the instruction is a load or store instruction
+ * returns the destination address.
+ */
+boolean_t
+isa_load_store(ins, dest_addr, getreg, arg)
+ register alpha_instruction ins;
+ vm_offset_t *dest_addr;
+ vm_offset_t (*getreg)();
+ vm_offset_t arg;
+{
+ if ((ins.mem_format.opcode == op_ldq_u) ||
+ (ins.mem_format.opcode == op_stq_u) ||
+ ((ins.mem_format.opcode >= op_ldf) &&
+ (ins.mem_format.opcode <= op_stq_c))) {
+
+ /*
+ * The only address calculation is register+displacement
+ */
+ *dest_addr = (vm_offset_t) (ins.mem_format.displacement +
+ (*getreg) (ins.mem_format.rs, arg));
+
+ return TRUE;
+ }
+ return FALSE;
+}
+
+/*
+ * Object:
+ * branch_taken EXPORTED function
+ *
+ * Branch prediction
+ *
+ * Returns the address where the instruction might branch,
+ * if the branch is taken.
+ * Needs the address where the instruction is located and
+ * a function returning the current value of some register.
+ *
+ * The instruction must be a call or branch, or we panic.
+ */
+vm_offset_t
+branch_taken(ins, addr, getreg, arg)
+ register alpha_instruction ins;
+ vm_offset_t addr;
+ vm_offset_t (*getreg)();
+ vm_offset_t arg;
+{
+ switch (ins.branch_format.opcode) {
+
+ case op_j:
+ return (*getreg) (ins.jump_format.rs, arg) & ~3;
+
+ case op_br:
+ case op_fbeq:
+ case op_fblt:
+ case op_fble:
+ case op_bsr:
+ case op_fbne:
+ case op_fbge:
+ case op_fbgt:
+ case op_blbc:
+ case op_beq:
+ case op_blt:
+ case op_ble:
+ case op_blbs:
+ case op_bne:
+ case op_bge:
+ case op_bgt:
+ return ((ins.branch_format.displacement << 2) + (addr + 4));
+
+ }
+
+ panic("branch_taken");
+}
+
+
+#if MACH_KDB
+/*
+ * Object:
+ * stack_modified EXPORTED function
+ *
+ * Does the instruction affect the stack pointer and how
+ *
+ * Returns the amount by which the instruction changes the
+ * stack pointer, or 0.
+ * Needs a function returning the current value of some
+ * register, but this needs not be precise as the C compiler
+ * typically uses immediate values.
+ */
+int stack_modified(ins, getreg, arg)
+ register alpha_instruction ins;
+ vm_offset_t (*getreg)();
+ vm_offset_t arg;
+{
+#define SP 30
+ /* frame is mods only by lda. else you lose */
+ if ((ins.mem_format.opcode == op_lda) &&
+ (ins.mem_format.rd == SP)) {
+
+ if (ins.mem_format.rs == SP)
+ return ins.mem_format.displacement;
+ return ins.mem_format.displacement +
+ (*getreg)(ins.mem_format.rs, arg) -
+ (*getreg)(SP, arg);
+ }
+ return 0;
+}
+
+#define ALPHA_REG_SAVE_SEARCH ((32+3)*4) /* ???? what is this ???? */
+
+/*
+ * Object:
+ * db_restore_regs EXPORTED function
+ *
+ * restore register environment
+ *
+ * This code assumes that all register saves are made at the prolog
+ * code, and only "lda[h]" and "stq xx,yy(sp)" appear in
+ * in it.
+ *
+ * Which is of course pure idiocy, unless you ever only saw GCC.
+ * Anyways, OSF gaveth and the code that uses it can tolerate it.
+ */
+void
+db_restore_regs(ssp, sp, proc_pc, cur_pc, task)
+ struct alpha_saved_state *ssp;
+ vm_offset_t sp;
+ vm_offset_t proc_pc;
+ vm_offset_t cur_pc;
+ task_t task;
+{
+ register vm_offset_t pc, epc;
+ alpha_instruction ins;
+ extern vm_size_t *addrof_alpha_reg();
+
+ epc = proc_pc + ALPHA_REG_SAVE_SEARCH;
+ if (epc > cur_pc)
+ epc = cur_pc;
+ for (pc = proc_pc; pc < epc; pc += sizeof(alpha_instruction)) {
+
+ ins.bits = db_get_task_value(pc, sizeof(alpha_instruction), FALSE, task);
+
+ if (ins.mem_format.opcode == op_lda
+ || ins.mem_format.opcode == op_ldah)
+ continue;
+
+ if ((ins.mem_format.rs == SP) &&
+ (ins.mem_format.opcode == op_stq)) {
+ if (ins.mem_format.rd != 31) {
+ vm_size_t *p = addrof_alpha_reg(ins.mem_format.rd,ssp);
+
+ *p = db_get_task_value(sp + ins.mem_format.displacement,
+ 8, FALSE, task);
+ }
+ continue;
+ }
+ end_prolog:
+ /*
+ * end of prolog code. look one more instruction.
+ */
+ if (epc > pc + 4)
+ epc = pc + 8;
+ }
+}
+
+#endif MACH_KDB
diff --git a/alpha/alpha/alpha_lock.S b/alpha/alpha/alpha_lock.S
new file mode 100644
index 00000000..319caec6
--- /dev/null
+++ b/alpha/alpha/alpha_lock.S
@@ -0,0 +1,310 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_lock.s,v $
+ * Revision 2.4 93/03/09 10:49:07 danner
+ * Added placeholder for non-MP case to satisfy GLD.
+ * [93/02/16 af]
+ *
+ * Revision 2.3 93/01/19 08:59:08 danner
+ * Locks are quad-words now.
+ * [92/12/30 af]
+ *
+ * Revision 2.2 93/01/14 17:11:20 danner
+ * Try_lock should try harder, until either wins it or lose it.
+ * [92/12/24 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:11:28 af]
+ *
+ * Created.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: alpha_lock.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/92
+ *
+ * Simple inter-locked operations
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <cpus.h>
+#include <platforms.h>
+
+#if (NCPUS>1)
+
+#include <mach/alpha/asm.h>
+
+ .set noreorder
+ .set noat
+
+/*
+ * Object:
+ * simple_lock_init EXPORTED function
+ *
+ * Initialize a simple lock
+ *
+ * Arguments:
+ * lock struct slock *
+ */
+LEAF(simple_lock_init,1)
+ stq zero,0(a0) /* its a long */
+ mb
+ RET
+ END(simple_lock_init)
+
+#if ADU
+/* We donno what this is yet. Possibly because of bus overload,
+ the ADU misses out on cache invalidates, sometimes. A fix
+ seems to be to flush the dcache in between attempts to get
+ a lock. That is done in C code. */
+
+#define simple_lock Simple_lock
+
+#endif /* ADU */
+
+/*
+ * Object:
+ * simple_lock EXPORTED function
+ *
+ * Acquire a simple lock
+ *
+ * Arguments:
+ * lock struct slock *
+ */
+LEAF(simple_lock,1)
+or s0,zero,t1
+or s1,zero,t2
+ mb
+or t1,zero,s0
+or t2,zero,s1
+ ldq_l t0,0(a0) /* fetch&lock */
+ or zero,2,v0 /* build "locked" value */
+ bne t0,simple_lock_loop /* was it taken already */
+ stq_c v0,0(a0) /* race to grab it */
+ beq v0,simple_lock_loop
+#if 1 /* debug */
+ mb
+#endif
+ RET /* got it alright */
+simple_lock_loop:
+#if 1 /* debug */
+ ldah a1,0x10(zero) /* 1mil */
+ lda a2,0x10(zero) /* in-between flushes */
+simple_lock_loop_:
+#endif
+ lda a1,-1(a1)
+ lda a2,-1(a2)
+or s0,zero,t1
+or s1,zero,t2
+ ldq t0,0(a0) /* check again */
+ nop /* do not double-issue */
+or t1,zero,s0
+or t2,zero,s1
+bgt a1,1f
+call_pal 0x80/*op_bpt*/
+ldah a1,10(zero)
+1:
+bgt a2,1f
+call_pal 0x86/*op_imb*/
+lda a2,0x10(zero)
+1:
+ bne t0,simple_lock_loop_ /* probably still held */
+ nop
+ br zero,simple_lock /* go and try again now */
+ END(simple_lock)
+
+/*
+ * Object:
+ * simple_unlock EXPORTED function
+ *
+ * Release a simple lock
+ *
+ * Arguments:
+ * lock struct slock *
+ */
+LEAF(simple_unlock,1)
+or s0,zero,t1
+or s1,zero,t2
+ mb /* make sure all writes completed */
+ stq zero,0(a0) /* its a long */
+or t1,zero,s0
+or t2,zero,s1
+ mb /* make damn sure they see it */
+ RET
+ END(simple_unlock)
+
+/*
+ * Object:
+ * simple_lock_try EXPORTED function
+ *
+ * Try once to acquire a simple lock
+ *
+ * Arguments:
+ * none
+ */
+LEAF(simple_lock_try,1)
+or s0,zero,t1
+or s1,zero,t2
+ mb
+or t1,zero,s0
+or t2,zero,s1
+ ldq_l t0,0(a0)
+ or zero,2,v0 /* build "locked" value */
+ bne t0,nope /* already set, forget it */
+ stq_c v0,0(a0) /* see if we still had the lock */
+ beq v0,yipe
+#if 1 /* debug */
+ mb
+#endif
+ RET /* if v0 != 0 then we got it */
+nope:
+ mov zero,v0 /* failed to acquire lock */
+ RET
+yipe: br zero,simple_lock_try /* try once more */
+ END(simple_lock_try)
+
+/*
+ * Object:
+ * i_bit_clear EXPORTED function
+ *
+ * Clear a bit in interlocked fashion
+ *
+ * Arguments:
+ * bitno unsigned int
+ * bitset unsigned long *
+ */
+LEAF(i_bit_clear,2)
+ or zero,1,t0
+ sll t0,a0,t0 /* mask up the bit */
+ ldq_l v0,0(a1) /* fetch&lock */
+ andnot v0,t0,v0 /* clear it */
+ stq_c v0,0(a1) /* put it back */
+ beq v0,i_bit_clear_again /* did the store succeed */
+ mb /* make sure they see it */
+ RET
+i_bit_clear_again:
+ br zero,i_bit_clear
+ END(i_bit_clear)
+
+/*
+ * Object:
+ * i_bit_set EXPORTED function
+ *
+ * Clear a bit in interlocked fashion
+ *
+ * Arguments:
+ * bitno unsigned int
+ * bitset unsigned long *
+ */
+LEAF(i_bit_set,2)
+ or zero,1,t0
+ sll t0,a0,t0 /* mask up the bit */
+ ldq_l v0,0(a1) /* fetch&lock */
+ or v0,t0,v0 /* set it */
+ stq_c v0,0(a1) /* put it back */
+ beq v0,i_bit_set_again /* did the store succeed */
+ mb /* make sure they see it */
+ RET
+i_bit_set_again:
+ br zero,i_bit_set
+ END(i_bit_set)
+
+/*
+ * Object:
+ * bit_lock EXPORTED function
+ *
+ * Acquire a bit lock
+ *
+ * Arguments:
+ * bitno unsigned int
+ * bitstring unsigned char *
+ *
+ * Do the argument mods and call the above
+ */
+LEAF(bit_lock,2)
+ and a0,0x7,t0 /* bit within byte */
+ srl a0,3,t1 /* byte no */
+ addq a1,t1,t1 /* pointer to byte */
+ and t1,0x7,t2 /* byte within word */
+ andnot t1,0x7,a1 /* aligned, arg ok now */
+ sll t2,3,t2
+ or t2,t0,a0 /* bit within word, arg ok */
+ br zero,i_bit_set
+
+ END(bit_lock)
+
+/*
+ * Object:
+ * bit_unlock EXPORTED function
+ *
+ * Release a bit lock
+ *
+ * Arguments:
+ * bitno unsigned int
+ * bitstring unsigned char *
+ */
+LEAF(bit_unlock,2)
+ and a0,0x7,t0 /* bit within byte */
+ srl a0,3,t1 /* byte no */
+ addq a1,t1,t1 /* pointer to byte */
+ and t1,0x7,t2 /* byte within word */
+ andnot t1,0x7,a1 /* aligned, arg ok now */
+ sll t2,3,t2
+ or t2,t0,a0 /* bit within word, arg ok */
+ br zero,i_bit_clear
+
+ END(bit_unlock)
+
+/*
+ * Object:
+ * Some statically allocated synchronization variables
+ *
+ * To reduce contention, some variables better not land on
+ * the same cache lines. Doing the allocation here, by hand
+ * assures the compiler will not make mistakes. Ugly, but.
+ */
+ .data
+ .align 5 /* align on 32 byte boundary */
+ .globl cpus_active
+cpus_active: .space 32
+ .globl cpus_idle
+cpus_idle: .space 32
+
+#else /* NCPUS>1 */
+
+ /* Linker does not like empty .o files */
+ .text
+ .globl uniprocessor_kernel
+uniprocessor_kernel: nop
+
+#endif /* NCPUS>1 */
diff --git a/alpha/alpha/alpha_mem_ops.c b/alpha/alpha/alpha_mem_ops.c
new file mode 100644
index 00000000..de314ceb
--- /dev/null
+++ b/alpha/alpha/alpha_mem_ops.c
@@ -0,0 +1,285 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_mem_ops.c,v $
+ * Revision 2.3 93/03/09 10:49:13 danner
+ * GCC lint.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/02/05 07:57:24 danner
+ * Fixed broken blkclr. Added protos. Added rindex.
+ * [93/02/04 00:54:25 af]
+ *
+ * Created.
+ * [92/12/10 14:50:41 af]
+ *
+ */
+/*
+ * File: alpha_mem_ops.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Memory copy, clear and compare operations, including string
+ * operations and byte-swaps.
+ *
+ */
+
+#include <mach/mach_types.h>
+
+/*
+ * Object:
+ * bzero EXPORTED function
+ *
+ * Clear memory locations
+ *
+ * Optimize for aligned memory ops, if possible and simple.
+ * Might need later recoding in assembly for better efficiency,
+ * just like many other functions in here.
+ */
+
+void
+bzero(
+ register vm_offset_t addr,
+ register unsigned bcount)
+{
+ register int i;
+
+ if (bcount == 0) /* sanity */
+ return;
+ switch (addr & 3) {
+ case 1:
+ *((char *) addr++) = 0;
+ if (--bcount == 0)
+ return;
+ case 2:
+ *((char *) addr++) = 0;
+ if (--bcount == 0)
+ return;
+ case 3:
+ *((char *) addr++) = 0;
+ if (--bcount == 0)
+ return;
+ default:
+ break;
+ }
+
+#define LOG_UNROLL 5
+#define PER_PASS (1 << LOG_UNROLL)
+ if (bcount >= PER_PASS) {
+ for (i = bcount >> LOG_UNROLL; i; i--, addr += PER_PASS) {
+ ((int *)addr)[ 0] = 0; ((int *)addr)[ 1] = 0;
+ ((int *)addr)[ 2] = 0; ((int *)addr)[ 3] = 0;
+ ((int *)addr)[ 4] = 0; ((int *)addr)[ 5] = 0;
+ ((int *)addr)[ 6] = 0; ((int *)addr)[ 7] = 0;
+ }
+ bcount &= (PER_PASS - 1); /* fast modulus */
+ }
+#undef PER_PASS
+#undef LOG_UNROLL
+
+ for (i = bcount >> 2; i; i--, addr += 4)
+ *((int *) addr) = 0;
+
+ switch (bcount & 3) {
+ case 3: *((char*)addr++) = 0;
+ case 2: *((char*)addr++) = 0;
+ case 1: *((char*)addr++) = 0;
+ default:break;
+ }
+}
+
+/*
+ * Object:
+ * blkclr EXPORTED function
+ *
+ * Same as above
+ *
+ */
+void
+blkclr(
+ register vm_offset_t addr,
+ register unsigned bcount)
+{
+ bzero(addr,bcount);
+}
+
+/*
+ * Object:
+ * bcopy EXPORTED function
+ *
+ * Memory copy
+ *
+ */
+void
+bcopy(
+ register vm_offset_t from,
+ register vm_offset_t to,
+ register unsigned bcount)
+{
+ register int i;
+
+ if ((from & 3) != (to & 3)) {
+ /* wont align easily */
+ while (bcount--)
+ *((char *) to++) = *((char *) from++);
+ return;
+ }
+ switch (to & 3) {
+ case 1:
+ *((char *) to++) = *((char *) from++);
+ if (--bcount == 0)
+ return;
+ case 2:
+ *((char *) to++) = *((char *) from++);
+ if (--bcount == 0)
+ return;
+ case 3:
+ *((char *) to++) = *((char *) from++);
+ if (--bcount == 0)
+ return;
+ default:
+ break;
+ }
+
+ /* XXX unroll */
+ for (i = bcount >> 2; i; i--, to += 4, from += 4)
+ *((int *) to) = *((int *) from);
+
+ switch (bcount & 3) {
+ case 3:
+ *((char *) to++) = *((char *) from++);
+ case 2:
+ *((char *) to++) = *((char *) from++);
+ case 1:
+ *((char *) to++) = *((char *) from++);
+ default:
+ break;
+ }
+}
+
+
+/*
+ * Object:
+ * ovbcopy EXPORTED function
+ *
+ * Overlapped byte copy
+ *
+ */
+ovbcopy(
+ register char *from,
+ register char *to,
+ register len)
+{
+ if (from < to) {
+ from += len;
+ to += len;
+ while (len--)
+ *--to = *--from;
+ } else {
+ while (len--)
+ *to++ = *from++;
+ }
+}
+
+
+/*
+ * Object:
+ * index EXPORTED function
+ *
+ * Find a character in a string
+ *
+ */
+const char *index(
+ register const char *str,
+ register char c)
+{
+ register char cc;
+
+ while (((cc = *str++) != c) && cc);
+
+ return (cc == c) ? str - 1 : (const char *) 0L;
+}
+
+
+/*
+ * Object:
+ * rindex EXPORTED function
+ *
+ * Find a character in a string, backwards
+ *
+ */
+char *rindex(str, c)
+ register char *str;
+ register char c;
+{
+ register char cc, *ccp = 0;
+
+ while (cc = *str++)
+ if (cc == c) ccp = str - 1;
+
+ return ccp;
+}
+
+/*
+ * Object:
+ * htonl EXPORTED function
+ *
+ * Host to network byte order conversion, long
+ *
+ */
+unsigned
+htonl(
+ register unsigned n)
+{
+ register unsigned tmp0, tmp1;
+
+ tmp0 = (n << 24) | (n >> 24);
+ tmp1 = (n & 0xff00) << 8;
+ tmp0 |= tmp1;
+ tmp1 = (n >> 8) & 0xff00;
+ return tmp0 | tmp1;
+}
+
+/*
+ * Object:
+ * ntohl EXPORTED function
+ *
+ * Byteswap an integer
+ *
+ */
+unsigned ntohl(
+ register unsigned n)
+{
+ register unsigned tmp0, tmp1;
+
+ tmp0 = (n << 24) | (n >> 24);
+ tmp1 = (n & 0xff00) << 8;
+ tmp0 |= tmp1;
+ tmp1 = (n >> 8) & 0xff00;
+ return tmp0 | tmp1;
+}
+
diff --git a/alpha/alpha/alpha_misc.c b/alpha/alpha/alpha_misc.c
new file mode 100644
index 00000000..3c02dcd2
--- /dev/null
+++ b/alpha/alpha/alpha_misc.c
@@ -0,0 +1,360 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_misc.c,v $
+ * Revision 2.6 93/08/31 15:15:42 mrt
+ * To reboot one has to say the operator halted it.
+ * [93/08/25 af]
+ *
+ * Revision 2.5 93/05/15 19:11:01 mrt
+ * machparam.h -> machspl.h
+ *
+ * Revision 2.4 93/03/26 17:55:46 mrt
+ * Lint.
+ * [93/03/23 af]
+ *
+ * Revision 2.3 93/03/09 10:49:29 danner
+ * String protos.
+ * [93/03/07 13:31:07 af]
+ *
+ * Fixed reboot (almost), I had forgotten all about the HWRPB. Sigh.
+ * [93/02/19 af]
+ *
+ * Revision 2.2 93/02/05 07:57:30 danner
+ * 32bit-prom-bug support (jeffreyh).
+ * Sketch proper reboot.
+ * [93/02/04 00:47:40 af]
+ *
+ * Created.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: alpha_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Miscellaneous operations.
+ *
+ * This is a catchall file, put things here IFF they
+ * truly do not deserve a separate module.
+ */
+#include <cpus.h>
+
+#include <mach/std_types.h>
+#include <kern/strings.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <alpha/alpha_cpu.h>
+#include <alpha/prom_interface.h>
+#include <sys/varargs.h>
+#include <vm/pmap.h>
+
+/*
+ * Object:
+ * halt EXPORTED function
+ *
+ * Halts the machine
+ *
+ */
+halt()
+{
+ prom_halt();
+}
+
+/*
+ * Object:
+ * halt_cpu EXPORTED function
+ *
+ * Halts the current cpu
+ *
+ */
+halt_cpu()
+{
+ (void) splhigh(); /* sanity */
+ alpha_reset_before_reboot();
+ halt();
+}
+
+/*
+ * Object:
+ * halt_all_cpus EXPORTED function
+ *
+ * Stop the machine, optionally reboot
+ *
+ * Is this obsoleted by the shutdown thread or not ?
+ */
+halt_all_cpus(reboot)
+ boolean_t reboot;
+{
+ if (reboot) {
+ printf("rebooting.... (transferring to monitor)\n\n");
+ prom_reboot();
+ }
+ (void) splhigh(); /* sanity */
+ halt_cpu();
+}
+
+#if (NCPUS > 1)
+
+#include <mach/processor_info.h>
+
+/*
+ * Object:
+ * cpu_start EXPORTED function
+ *
+ * Start a processor running
+ *
+ */
+kern_return_t
+cpu_start(processor_no)
+{
+ return KERN_SUCCESS;
+}
+
+/*
+ * Object:
+ * cpu_control EXPORTED function
+ *
+ * Play with a processor
+ *
+ */
+kern_return_t
+cpu_control(processor_num, info, count)
+ int processor_num;
+ processor_info_t info;
+ natural_t *count;
+{
+ return KERN_FAILURE;
+}
+
+/*
+ * Object:
+ * start_other_cpus EXPORTED function
+ *
+ */
+start_other_cpus()
+{
+ extern simple_lock_data_t slave_init_lock;
+
+ simple_unlock(&slave_init_lock);
+}
+
+
+/*
+ * Object:
+ * simple_lock_pause EXPORTED function
+ *
+ * Idle spinning on simple lock retry
+ *
+ */
+/* XXX should be adjusted per CPU speed */
+int simple_lock_pause_loop = 100;
+
+unsigned int simple_lock_pause_count = 0; /* debugging */
+
+void
+simple_lock_pause()
+{
+ static volatile int dummy;
+ int i;
+
+ simple_lock_pause_count++;
+
+ /*
+ * Used in loops that are trying to acquire locks out-of-order.
+ */
+
+ for (i = 0; i < simple_lock_pause_loop; i++)
+ dummy++; /* keep the compiler from optimizing the loop away */
+}
+
+switch_to_shutdown_context() {gimmeabreak(); /* NYI */ }
+
+#endif
+
+/*
+ * Object:
+ * getchar EXPORTED function
+ *
+ * Read a character from console
+ *
+ * Does echo, maps cr->newline
+ */
+getchar()
+{
+ register c;
+
+ c = cngetc();
+ if (c == '\r')
+ c = '\n';
+ if (c != -1) /* lk201 hickup */
+ cnputc(c);
+ return (c);
+}
+
+/*
+ * Object:
+ * pre_prom_putc EXPORTED function
+ *
+ * Remap char before passing off to prom
+ *
+ * Prom only takes 32 bit addresses. Copy char somewhere prom can
+ * find it. This routine will stop working after pmap_rid_of_console
+ * is called in alpha_init. This is due to the hard coded address
+ * of the console area.
+ *
+ */
+
+pre_prom_putc(c)
+ unsigned char c;
+{
+ unsigned char *to = (unsigned char *)0x20000000;
+
+ if (c == '\n') {
+ strcpy ((char*)to,"\r\n");
+ prom_puts(alpha_console,to,2);
+ } else {
+ *to = c;
+ prom_puts(alpha_console,to,1);
+ }
+}
+
+alpha_con_putc (unit, line,c)
+{
+ pre_prom_putc(c);
+}
+/*
+ * Object:
+ * pre_prom_getc EXPORTED function
+ *
+ * Wait for keystroke
+ *
+ * Wait for the prom to get a real char and pass it back.
+ *
+ */
+
+unsigned char
+pre_prom_getc(wait,raw)
+ boolean_t wait;
+ boolean_t raw;
+
+{
+ unsigned long val;
+ int code;
+
+ do {
+ val = prom_getc(alpha_console);
+ code = val >> 61; /* the status is bits 63:61 */
+ } while (((code != 0) && (code != 1)) && wait); /* 0 = success
+ * 1 = success, more ready
+ * other are failures
+ */
+
+ return((unsigned char) val ); /* Pass back the char (lower 8 bits) */
+}
+
+int
+alpha_con_getc (unit, line,wait,raw)
+{
+ unsigned char c = pre_prom_getc(wait,raw);
+ return (c) ? c : -1;
+}
+
+
+/*
+ * Object:
+ * dprintf EXPORTED function
+ *
+ * Debugging printouts
+ *
+ * Like printf, ship characters to prom.
+ * Used before console line is properly configured,
+ * or out of despair.
+ *
+ */
+dprintf(fmt, va_alist)
+ char *fmt;
+ va_dcl
+{
+ va_list listp;
+
+ va_start(listp);
+ _doprnt(fmt, &listp, pre_prom_putc, 16);
+ va_end(listp);
+}
+
+/*
+ * Object:
+ * prom_reboot EXPORTED function
+ *
+ * Reboots the system up.
+ *
+ * NOTE: it would be nice to export from the kernel a way to
+ * specify which image to reboot to, but not all architectures
+ * can do it. Sigh.
+ */
+void
+prom_reboot()
+{
+ struct restart_blk *r;
+ struct per_cpu_slot *p;
+ int offset;
+
+ r = alpha_hwrpb;
+
+ offset = r->percpu_slot_size * cpu_number();
+ p = (struct per_cpu_slot *) ((char*)r + r->percpu_slots_offset +
+ offset);
+
+ (void) splhigh();
+ alpha_reset_before_reboot();
+
+ p->state_flags = PSTATE_H_WARM_BOOT | PSTATE_PL | PSTATE_OH;
+ wbflush();
+ halt();
+}
+
+/*
+ * Object:
+ * timemmap EXPORTED function
+ *
+ * Map the time info to user space
+ *
+ */
+#include <mach/vm_prot.h>
+
+timemmap(dev,off,prot)
+ vm_prot_t prot;
+{
+ extern int *mtime;
+
+ if (prot & VM_PROT_WRITE)
+ return (-1);
+
+ return (alpha_btop(pmap_extract(pmap_kernel(), mtime)));
+}
+
diff --git a/alpha/alpha/alpha_scb.c b/alpha/alpha/alpha_scb.c
new file mode 100644
index 00000000..1d5d50ae
--- /dev/null
+++ b/alpha/alpha/alpha_scb.c
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_scb.c,v $
+ * Revision 2.2 93/01/14 17:11:32 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:11:48 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+
+/*
+ * File: alpha_scb.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Alpha System Control Block (exception and interrupt dispatch)
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <mach/std_types.h>
+#include <alpha/alpha_scb.h>
+
+/*
+ * We only play with interrupt vectors
+ */
+struct scb_entry {
+ void (*dispatcher_routine)( );
+ void (*service_routine)( );
+};
+
+extern void TRAP_interrupt( );
+extern void stray_interrupt( );
+
+extern struct scb_entry alpha_scb[N_SCB_ENTRIES];
+
+boolean_t
+alpha_set_scb_entry( unsigned int entry_no, void (*routine)())
+{
+ register struct scb_entry *e;
+
+ e = &alpha_scb[entry_no];
+
+ /* sanity checks */
+ if ((e < &alpha_scb[N_SCB_ENTRIES]) &&
+ (e->dispatcher_routine == TRAP_interrupt)) {
+ alpha_scb[entry_no].service_routine = routine;
+ return TRUE;
+ }
+ panic("set_scb_entry");
+ return FALSE;
+}
+
+boolean_t
+alpha_clear_scb_entry( unsigned int entry_no)
+{
+ return alpha_set_scb_entry( entry_no, stray_interrupt);
+}
diff --git a/alpha/alpha/alpha_scb.h b/alpha/alpha/alpha_scb.h
new file mode 100644
index 00000000..53211af8
--- /dev/null
+++ b/alpha/alpha/alpha_scb.h
@@ -0,0 +1,67 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_scb.h,v $
+ * Revision 2.2 93/01/14 17:11:36 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:11:38 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+
+/*
+ * File: alpha_scb.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Alpha System Control Block, dynamically settable entries
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#define SCB_SOFTCLOCK 81
+#define SCB_CLOCK 96
+#define SCB_INTERPROC 97
+
+#define SCB_INTERRUPT_FIRST 128
+#define SCB_MAX_INTERRUPTS 512
+
+#define N_SCB_ENTRIES (128+512)
+
+#ifndef ASSEMBLER
+
+extern boolean_t alpha_set_scb_entry( unsigned int entry_no,
+ void (*routine)() );
+
+extern boolean_t alpha_clear_scb_entry( unsigned int entry_no);
+
+#endif /* ! ASSEMBLER */
diff --git a/alpha/alpha/alpha_startup.c b/alpha/alpha/alpha_startup.c
new file mode 100644
index 00000000..a19096a9
--- /dev/null
+++ b/alpha/alpha/alpha_startup.c
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_startup.c,v $
+ * Revision 2.2 93/01/14 17:11:39 danner
+ * Created.
+ * [92/06/05 af]
+ *
+ */
+/*
+ * File: alpha_startup.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Machine dependent startup code.
+ */
+
+#include <mach/vm_param.h>
+
+/*
+ * Machine-dependent startup code.
+ */
+machine_startup()
+{
+ extern char version[];
+
+ /*
+ * Initialization message print.
+ */
+ printf(version);
+#define KBYTE 1024
+#define MEG (KBYTE*KBYTE)
+ printf("memory: %d.%d%d megabytes.\n", mem_size / MEG,
+ ((mem_size % MEG) * 10) / MEG,
+ ((mem_size % (MEG / 10)) * 100) / MEG);
+
+ /*
+ * Start the system up
+ */
+ setup_main();
+}
+
diff --git a/alpha/alpha/ast.h b/alpha/alpha/ast.h
new file mode 100644
index 00000000..b477e389
--- /dev/null
+++ b/alpha/alpha/ast.h
@@ -0,0 +1,45 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: ast.h,v $
+ * Revision 2.2 93/01/14 17:11:43 danner
+ * Created.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * File: ast.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 05/92
+ *
+ * Machine-dependent AST file for ALPHA.
+ */
+
+#ifndef _ALPHA_AST_H_
+#define _ALPHA_AST_H_
+
+#endif _ALPHA_AST_H_
diff --git a/alpha/alpha/ast_check.c b/alpha/alpha/ast_check.c
new file mode 100644
index 00000000..9471d6c9
--- /dev/null
+++ b/alpha/alpha/ast_check.c
@@ -0,0 +1,69 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ * $Log: ast_check.c,v $
+ * Revision 2.2 93/01/14 17:11:46 danner
+ * Copied from dbg's i386 file.
+ * [92/11/20 af]
+ *
+ */
+#include <cpus.h>
+
+#if NCPUS > 1
+
+/*
+ * Handle signalling ASTs on other processors.
+ *
+ * Initial i386 implementation does nothing.
+ */
+
+#include <kern/processor.h>
+
+/*
+ * Initialize for remote invocation of ast_check.
+ */
+init_ast_check(processor)
+ processor_t processor;
+{
+#ifdef lint
+ processor++;
+#endif lint
+}
+
+/*
+ * Cause remote invocation of ast_check. Caller is at splsched().
+ */
+cause_ast_check(processor)
+ processor_t processor;
+{
+#ifdef lint
+ processor++;
+#endif lint
+}
+
+#endif /* NCPUS > 1 */
diff --git a/alpha/alpha/ast_types.h b/alpha/alpha/ast_types.h
new file mode 100644
index 00000000..1cf5398b
--- /dev/null
+++ b/alpha/alpha/ast_types.h
@@ -0,0 +1,51 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: ast_types.h,v $
+ * Revision 2.2 93/01/14 17:11:52 danner
+ * Created.
+ * [92/12/10 af]
+ *
+ */
+/*
+ * File: ast_types.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/92
+ *
+ * Machine-dependent definitions for the AST mechanisms
+ */
+
+#ifndef _ALPHA_AST_TYPES_H_
+#define _ALPHA_AST_TYPES_H_
+
+/*
+ * Data type for remote ast_check() invocation support. Currently
+ * not implemented. Do this first to avoid include problems.
+ */
+typedef integer_t ast_check_t;
+
+#endif /* _ALPHA_AST_TYPES_H_ */
diff --git a/alpha/alpha/autoconf.c b/alpha/alpha/autoconf.c
new file mode 100644
index 00000000..414aedb8
--- /dev/null
+++ b/alpha/alpha/autoconf.c
@@ -0,0 +1,317 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: autoconf.c,v $
+ * Revision 2.4 93/08/10 15:15:34 mrt
+ * The answer to the puzzle below came in, and it reads:
+ * "..all the systems implemented the fields backwards, so that
+ * MAJOR and MINOR were switched. The SRM was changed to match.."
+ * [93/08/06 af]
+ *
+ * An FRU on my Flamingo brought by new codes for the processor
+ * type.
+ * [93/08/05 af]
+ *
+ * Revision 2.3 93/01/19 08:29:08 danner
+ * Added reference to doc for the HWRPB &co.
+ * [93/01/19 af]
+ *
+ * Revision 2.2 93/01/14 17:11:58 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:11:55 af]
+ *
+ * Created.
+ * [92/12/10 14:53:51 af]
+ *
+ */
+
+/*
+ * File: autoconf.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Setup the system to run on the current machine.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "VMS for Alpha Platforms Internals and Data Structures"
+ * Digital Press 1992, Burlington, MA 01803
+ * Order number EY-L466E-P1/2, ISBN 1-55558-095-5
+ * [Especially volume 1, chapter 33 "Bootstrap processing"]
+ */
+
+#include <platforms.h>
+#include <cpus.h>
+
+#include <mach/std_types.h>
+#include <sys/types.h>
+#include <chips/busses.h>
+
+#include <mach/machine.h>
+#include <kern/cpu_number.h>
+#include <kern/zalloc.h>
+
+#include <alpha/thread.h>
+#include <alpha/alpha_cpu.h>
+#include <alpha/prom_interface.h>
+
+boolean_t cold; /* flag we're configuring */
+
+/*
+ * Get cpu type, and then switch out to machine specific procedures
+ * which will probe adaptors to see what is out there.
+ */
+machine_init()
+{
+#if NCPUS > 1
+ /*
+ * See how many cpus we got
+ */
+ extern int processors_running[];
+ int i;
+
+ for (i = 0; i < NCPUS; i++)
+ if (processors_running[i]) machine_slot[i].is_cpu = TRUE;
+#endif
+
+ cold = 1;
+
+ /*
+ * Say what kind of cpu chip is the primary
+ */
+ identify_cpu(master_cpu);
+
+ /*
+ * Find out what sort of machine we are on.
+ */
+ identify_alphabox();
+
+ /*
+ * Now for peripherals.
+ * Note that console is already up by now.
+ */
+ identify_devices();
+
+ /*
+ * Find root device, if different from boot
+ */
+ get_root_name();
+
+ /*
+ * All done.
+ */
+ cold = 0;
+}
+
+
+void slave_machine_init()
+{
+ identify_cpu(cpu_number());
+}
+
+
+/*
+ * Device probing procedure.
+ */
+
+identify_devices()
+{
+ /* each bus should have its prober (a-la tc_config())
+ which is truly searched when we look for the console.
+ Here we should just call configure_bus_master on the
+ bus descriptors of all the possible busses */
+
+ register struct bus_ctlr *bus = bus_master_init;
+
+ while (bus->driver) {
+ if ((bus->flags & BUS_CTLR) &&
+ ((*bus->driver->probe)(0,bus) != 0)) {
+ printf("Attaching %s%d:\n", bus->name, bus->unit);
+ (*bus->driver->attach)((struct bus_device *)bus);
+ }
+ bus++;
+ }
+}
+
+/*
+ * Say which machine
+ */
+identify_alphabox()
+{
+# define _MAX_KNOWN_ST_ 6
+ static char *system_types[_MAX_KNOWN_ST_+1] = {
+ "Undefined", "ADU", "Cobra", "Ruby", "Flamingo",
+ "Mannequin", "Jensen"
+ };
+
+ /*
+ * Sanity checks
+ */
+ if (alpha_hwrpb->my_version > 3)
+ printf("Warning: HWRPB version %d might be incompatible\n",
+ alpha_hwrpb->my_version);
+#if NCPUS > 1
+ if (alpha_hwrpb->primary_cpu_id != cpu_number())
+ printf("Mumble.. primary cpu %d not master %d ?\n",
+ alpha_hwrpb->primary_cpu_id, cpu_number());
+#endif
+
+ /*
+ * System data
+ */
+ {
+ char *box;
+ char *ssn = alpha_hwrpb->system_serial_number;
+ char *rev = alpha_hwrpb->system_revision;
+
+ if (alpha_hwrpb->system_type > _MAX_KNOWN_ST_)
+ box = "New/Unknown System";
+ else
+ box = system_types[alpha_hwrpb->system_type];
+
+ printf("box: %s, Revision %c%c%c%c, Variation %x,",
+ box, rev[0], rev[1], rev[2], rev[3],
+ alpha_hwrpb->system_variation);
+
+ printf(" Serial Number %c%c%c%c%c%c%c%c%c%c\n",
+ ssn[0], ssn[1], ssn[2], ssn[3], ssn[4],
+ ssn[5], ssn[6], ssn[7], ssn[8], ssn[9]);
+ }
+
+ /*
+ * Clock and timings
+ */
+ {
+ extern int hz, tick;
+
+ hz = alpha_hwrpb->clock_interrupt_frequency / 4096;
+ tick = 1000000 / hz;
+ printf("clk: %d cycles/sec. HZ is %d intr/sec.\n",
+ alpha_hwrpb->cycle_counter_resolution, hz);
+ }
+
+}
+
+/* Here we truly want our hw slot */
+#ifdef cpu_number
+#undef cpu_number
+#endif
+/*
+ * Say what cpu we've got
+ */
+identify_cpu(self)
+ int self;
+{
+ struct per_cpu_slot *pcpu;
+ unsigned int slotno = cpu_number();
+
+# define _MAX_KNOWN_CT_ 3
+ static char *cpu_names[_MAX_KNOWN_CT_+1] = {
+ "Unknown", "EV3", "EV4", "ISP"
+ };
+
+/* if (alpha_hwrpb->system_variation & SYSTEM_VAR_MPCAP) broken */
+#if (NCPUS>1)
+ printf("%s cpu %d: ",
+ (self == master_cpu) ? "Primary" : "Secondary",
+ slotno);
+#else
+ printf("cpu: ");
+#endif
+
+ pcpu = (struct per_cpu_slot *)
+ ((vm_offset_t)alpha_hwrpb + alpha_hwrpb->percpu_slots_offset +
+ (slotno * alpha_hwrpb->percpu_slot_size));
+
+ machine_slot[self].is_cpu = TRUE;
+ machine_slot[self].cpu_type = CPU_TYPE_ALPHA;
+ machine_slot[self].cpu_subtype = pcpu->processor_major_type;
+ machine_slot[self].running = TRUE;
+ machine_slot[self].clock_freq = /* hz incorrect for now */
+ alpha_hwrpb->clock_interrupt_frequency / 4096;
+
+ /*
+ * Say about the processor itself
+ */
+ {
+ char *cpuname, *pass, *rev, *ssn;
+
+ if (pcpu->processor_major_type > _MAX_KNOWN_CT_)
+ cpuname = "New/Unknown";
+ else
+ cpuname = cpu_names[pcpu->processor_major_type];
+
+ switch (pcpu->processor_minor_type) {
+ case 0: pass = " Pass 2"; break;
+ case 1: pass = " Pass 3"; break;
+ default: pass = ""; break;
+ }
+
+ rev = pcpu->processor_revision;
+
+ ssn = pcpu->processor_serial_number;
+
+ printf("Alpha %s%s Processor, Variation %x, ",
+ cpuname, pass, /* pcpu->state_flags, */
+ pcpu->processor_variation);
+
+ printf("Revision %c%c%c%c, Serial Number %c%c%c%c%c%c%c%c%c%c\n",
+ rev[0], rev[1], rev[2], rev[3],
+ ssn[0], ssn[1], ssn[2], ssn[3], ssn[4],
+ ssn[5], ssn[6], ssn[7], ssn[8], ssn[9]);
+#if 0
+EV3 has 1k caches, EV4 has 8k, 21064 has 8k
+#endif
+ }
+
+ /*
+ * Say about the PAL code
+ */
+ {
+ union {
+ struct {
+ unsigned char minor_no;
+ unsigned char major_no;
+ unsigned char type;
+ unsigned char mbz;
+ unsigned short compatibility;
+ unsigned short max_share;
+ } info;
+ natural_t bits;
+ } palrev;
+
+ palrev.bits = pcpu->palcode_revision_info;
+ printf("pal: at slot %d Rev %d.%d, Type %d, Compat %d, MP %x\n",
+ slotno, palrev.info.major_no, palrev.info.minor_no,
+ palrev.info.type, palrev.info.compatibility,
+ palrev.info.max_share);
+ }
+}
+
diff --git a/alpha/alpha/c_misc.c b/alpha/alpha/c_misc.c
new file mode 100644
index 00000000..32b2a9ff
--- /dev/null
+++ b/alpha/alpha/c_misc.c
@@ -0,0 +1,421 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: c_misc.c,v $
+ * Revision 2.3 93/03/09 10:49:35 danner
+ * Prom dispatching, protos, now can ask for file and read labels.
+ * Which means booting from any partition. Also does a.out images.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/02/05 08:00:26 danner
+ * Created a while back.
+ * [93/02/04 af]
+ *
+ */
+/*
+ * File: c_misc.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/90
+ *
+ * Miscellaneous C code.
+ */
+
+#include <mach/std_types.h>
+
+#include <alpha/coff.h>
+#include <alpha/exec.h>
+#include <alpha/syms.h>
+#include "ufs.h"
+#include "min_io.h"
+
+#include "dev.h"
+#include "../../device/disk_status.h"
+#include "../../scsi/rz_labels.h"
+
+
+/*
+ * Prom callback setup.
+ */
+#include "prom_routines.h"
+
+int console;
+
+void
+init_prom_calls()
+{
+ struct restart_blk *alpha_hwrpb;
+ struct restart_blk *r;
+ struct console_routine_blk *c;
+ vm_offset_t addr;
+
+ r = (struct restart_blk *)RESTART_ADDR;
+
+ c = (struct console_routine_blk *)
+ ((char*)r + r->console_routine_block_offset);
+
+ prom_dispatch_v.routine_arg = c->dispatch_func_desc;
+ prom_dispatch_v.routine = c->dispatch_func_desc->code;
+
+ /*
+ * Now look for console tty
+ */
+ {
+ char buf[4];
+ prom_getenv( PROM_E_TTY_DEV, buf, 4);
+ console = buf[0] - '0';
+ }
+
+ /*
+ * Turn on superpage
+ */
+ {
+ struct per_cpu_slot *p;
+ int offset;
+
+ offset = r->percpu_slot_size * cpu_number();
+ p = (struct per_cpu_slot *) ((char*)r +
+ r->percpu_slots_offset +
+ offset);
+ addr = enable_suppage(p->palcode_memory);
+ }
+}
+
+/*
+ * Utilities
+ * Note that SIZE is of the essence here, not speed
+ */
+bcopy(from, to, bcount)
+ register char *from, *to;
+ register bcount;
+{
+ if (from < to) {
+ from += bcount;
+ to += bcount;
+ while (bcount-- > 0)
+ *--to = *--from;
+ } else {
+ while (bcount-- > 0)
+ *to++ = *from++;
+ }
+}
+
+bzero(to, bcount)
+ register char *to;
+ register bcount;
+{
+ while (bcount-- > 0)
+ *to++ = 0;
+}
+
+
+/*
+ * Dynamic memory allocator
+ */
+struct fl {
+ struct fl *next;
+ int size;
+} *freelist = 0;
+extern char end[];
+char *top = end;
+
+vm_allocate(x, ptr, size)
+ struct fl **ptr;
+{
+ register struct fl *f = freelist,
+ *prev;
+
+ prev = (struct fl *)&freelist;
+ while (f && f->size < size){
+ prev = f;
+ f = f->next;
+ }
+ if (f == 0) {
+ f = (struct fl*)top;
+ top += (size + 7) & ~7;
+ } else
+ prev->next = f->next;
+ *ptr = f;
+ bzero(f, size);
+ return 0;
+}
+
+vm_deallocate(x, ptr, size)
+ struct fl *ptr;
+{
+ ptr->size = size;
+ ptr->next = freelist;
+ freelist = ptr;
+ return 0;
+}
+
+/*
+ * Character subroutines
+ */
+
+unsigned long
+strlen( register const char *str )
+{
+ register const char *s = str;
+
+ while (*str++);
+
+ return (str - s - 1);
+}
+
+/*
+ * Compare strings: s1>s2: >0 s1==s2: 0 s1<s2: <0
+ */
+
+strcmp(s1, s2)
+register const char *s1, *s2;
+{
+ while (*s1 == *s2++)
+ if (*s1++=='\0')
+ return(0);
+ return(*s1 - *--s2);
+}
+
+void putnum( unsigned long int n)
+{
+ char digits[24], *p = &digits[23];
+
+ while (n) {
+ *p-- = ((n & 0xf) > 9) ?
+ ((n & 0xf) + 'a'-10)
+ : ((n & 0xf) + '0');
+ n >>= 4;
+ }
+ *p = '0';
+ prom_puts(console, p, &digits[24] - p);
+}
+
+
+#ifdef INTERACTIVE
+
+int gets( unsigned char *buf )
+{
+ register unsigned int c;
+ register unsigned long ret;
+ register unsigned char *cp = buf;
+
+ do {
+ ret = prom_getc(console);
+ c = ret & 0xff;
+ ret >>= 61;
+ if ((ret == 0) || (ret == 1)) {
+ *cp = c;
+ prom_puts(console, cp++, 1);
+ }
+ } while (c != '\r');
+ *cp = '\n';
+ prom_puts(console, cp, 1);
+ cp[-1] = 0; /* Squash CR-LF */
+ return (cp - 1) - buf;
+}
+
+#endif /* INTERACTIVE */
+
+/*
+ * Load and transfer control to executable file image
+ */
+void
+aout_exec( open_file_t file,
+ struct exec *hdr,
+ long arg0)
+{
+ /* lets not kid ourselves */
+ if (hdr->a_magic != OMAGIC) return;
+
+ /* text, all there is */
+ lseek( file, sizeof(*hdr), 0);
+ putnum(hdr->a_text);
+ if (read(file, (char *)hdr->a_tstart, hdr->a_text) != hdr->a_text)
+ return;
+
+ /* if data */
+ if (hdr->a_data) {
+ puts("+");
+ putnum(hdr->a_data);
+ if (read(file, (char *)hdr->a_dstart, hdr->a_data) != hdr->a_data)
+ return;
+ }
+
+ /* if bss */
+ if (hdr->a_bss) {
+ puts("+");
+ putnum(hdr->a_bss);
+ bzero(hdr->a_dstart + hdr->a_data, hdr->a_bss);
+ }
+
+ puts(" start ");
+ putnum(hdr->a_entry);
+ puts("\r\n");
+ (*((int (*) ()) hdr->a_entry)) (arg0);
+}
+
+void
+exec( open_file_t file,
+ long arg0)
+{
+ union {
+ struct exechdr coff;
+ struct exec aout;
+ } hdr;
+ register struct exechdr *h;
+ register int symsize, rsize;
+ HDRR *symptr;
+
+ /*
+ * Read in and check header
+ */
+ if (read(file, (char *)&hdr, sizeof(hdr)) != sizeof(hdr))
+ return;
+
+ if (N_BADMAG(hdr.coff.a)) {
+ aout_exec(file, &hdr.aout, arg0);
+ return;
+ }
+
+ /*
+ * Text
+ */
+ lseek(file, N_TXTOFF(hdr.coff.f, hdr.coff.a), 0);
+ putnum(hdr.coff.a.tsize);
+ if (read(file, (char *)hdr.coff.a.text_start, hdr.coff.a.tsize) != hdr.coff.a.tsize) {
+ return;
+ }
+
+ /*
+ * Data
+ */
+ puts("+");
+ putnum(hdr.coff.a.dsize);
+ if (read(file, (char *)hdr.coff.a.data_start, hdr.coff.a.dsize) != hdr.coff.a.dsize) {
+ return;
+ }
+
+ /*
+ * Bss
+ */
+ puts("+");
+ putnum(hdr.coff.a.bsize);
+ bzero(hdr.coff.a.bss_start, hdr.coff.a.bsize);
+
+ /*
+ * Symbol table is loaded by cheating on data segment size
+ */
+
+ /*
+ * Entry and go
+ */
+ puts(" start ");
+ putnum(hdr.coff.a.entry);
+ puts("\r\n");
+ (*((int (*) ()) hdr.coff.a.entry)) (arg0);
+}
+
+/*
+ * Device interface (between filesystems and PROM)
+ */
+
+boot_device_open(dev)
+ struct dev_t *dev;
+{
+ unsigned char devname[32];
+ struct disklabel *l;
+ vm_size_t resid;
+ prom_return_t ret;
+ int devlen;
+
+ ret.bits = prom_getenv( PROM_E_BOOTED_DEV, devname, sizeof devname);
+ devlen = ret.u.retval;
+
+ ret.bits = prom_open(devname, devlen);
+ if (ret.u.status)
+ return -1;
+
+ dev->handle = ret.u.retval;
+
+ /* partitioning */
+ dev->first_block = 0;
+ dev->last_block = -1;
+
+#if USE_LABEL
+ device_read(dev, 0, 512, &l, &resid);
+
+ l = (struct disklabel*)((char*)l + LABELOFFSET);
+ if (l->d_magic == DISKMAGIC) {
+ register unsigned char *cp;
+ int c;
+
+ /* Another shot of genius from the DEC folks. Now
+ the syntax of boot devices is per-system type.
+ This code assumes something like
+ "<junk> upp N <syntax qualifier>"
+ <syntax qualifier> ::= non-blanks
+ N ::= single digit
+ pp ::= partition, 2 digits, we take the second
+ u ::= unit number
+ junk ::= anything
+ */
+
+ cp = &devname[devlen];
+ while (*(--cp) != ' ') ;
+ while (*(--cp) != ' ') ;
+ c = cp[-1] - '0';
+
+ if (c < 0 || c >= l->d_npartitions)
+ return 0;
+
+ dev->first_block = l->d_partitions[c].p_offset;
+ dev->last_block = dev->first_block + l->d_partitions[c].p_size;
+ }
+#endif /* USE_LABEL */
+ return 0;
+}
+
+device_close(dev)
+ struct dev_t *dev;
+{
+ dev->first_block = -1;
+ return prom_close(dev->handle);
+}
+
+device_read(dev, block, size, ptr, rsize)
+ struct dev_t *dev;
+ char **ptr; /* out */
+ vm_size_t *rsize; /* out */
+{
+ prom_return_t ret;
+
+ if (block < dev->first_block || block > dev->last_block)
+ return FS_INVALID_PARAMETER;
+ vm_allocate(0, ptr, size);
+ ret.bits = prom_read( dev->handle, size, *ptr, block);
+ *rsize = ret.u.retval;
+ return ret.u.status;
+}
diff --git a/alpha/alpha/clock.c b/alpha/alpha/clock.c
new file mode 100644
index 00000000..9eeac5a4
--- /dev/null
+++ b/alpha/alpha/clock.c
@@ -0,0 +1,84 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: clock.c,v $
+ * Revision 2.2 93/01/14 17:12:08 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:12:06 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+
+/*
+ * File: alpha_clock.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Alpha System Clock handler
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <mach/std_types.h>
+
+#include <alpha/alpha_cpu.h>
+#include <alpha/alpha_scb.h>
+
+extern void softclock();
+
+void alpha_clock(
+ struct alpha_saved_state *ss_ptr,
+ natural_t r4,
+ natural_t r5,
+ natural_t cause)
+{
+ register natural_t ps = ss_ptr->framep->saved_ps;
+ extern int tick;
+
+ clock_interrupt( (integer_t)tick,
+ alpha_user_mode(ps),
+ (ps & PS_IPL_MASK) == 0);
+}
+
+startrtclock()
+{
+ alpha_set_scb_entry( SCB_CLOCK, alpha_clock);
+ alpha_set_scb_entry( SCB_SOFTCLOCK, softclock);
+}
+stopclocks()
+{
+ alpha_clear_scb_entry( SCB_CLOCK );
+}
+resettodr()
+{
+}
+
diff --git a/alpha/alpha/clock.h b/alpha/alpha/clock.h
new file mode 100644
index 00000000..db9ffa05
--- /dev/null
+++ b/alpha/alpha/clock.h
@@ -0,0 +1,73 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: clock.h,v $
+ * Revision 2.2 93/03/10 09:03:38 danner
+ * Copied from mips.
+ * [93/03/09 18:10:16 af]
+ *
+ * Revision 2.4 91/05/14 17:32:37 mrt
+ * Correcting copyright
+ *
+ * Revision 2.3 91/02/05 17:47:15 mrt
+ * Added author notices
+ * [91/02/04 11:21:14 mrt]
+ *
+ * Changed to use new Mach copyright
+ * [91/02/02 12:24:48 mrt]
+ *
+ * Revision 2.2 89/11/29 14:12:42 af
+ * Adapted for pure kernel, removed BSD remnants.
+ * [89/10/29 af]
+ *
+ * Revision 2.1 89/05/30 12:55:27 rvb
+ * Created.
+ *
+ * 4-Jan-89 Alessandro Forin (af) at Carnegie-Mellon University
+ * Created.
+ */
+
+/*
+ * File: clock.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/89
+ *
+ * General definitions for clock devices
+ *
+ */
+
+/*
+ * General time definitions
+ */
+#define SECMIN ((unsigned)60) /* seconds per minute */
+#define SECHOUR ((unsigned)(60*SECMIN)) /* seconds per hour */
+#define SECDAY ((unsigned)(24*SECHOUR)) /* seconds per day */
+#define SECYR ((unsigned)(365*SECDAY)) /* sec per reg year */
+
+#define YRREF 1970
+#define LEAPYEAR(x) (((x) % 4) == 0)
+
diff --git a/alpha/alpha/context.S b/alpha/alpha/context.S
new file mode 100644
index 00000000..7aba0248
--- /dev/null
+++ b/alpha/alpha/context.S
@@ -0,0 +1,476 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: context.s,v $
+ * Revision 2.2 93/02/05 07:57:44 danner
+ * Change all mov inst to or inst
+ * [93/01/12 jeffreyh]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:12:29 af]
+ *
+ * Created.
+ * [92/12/10 14:55:23 af]
+ *
+ *
+ */
+/*
+ * File: context.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Context switching and context save/restore primitives
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <cpus.h>
+
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+
+#include <alpha/alpha_cpu.h>
+#include <mach/alpha/vm_param.h>
+#include <alpha/pmap.h>
+#include <alpha/thread.h>
+#include <alpha/context.h>
+
+#include <assym.s>
+
+ .set noreorder /* unless overridden */
+
+/*
+ * Object:
+ * active_threads IMPORTED variable
+ * active_stacks IMPORTED variable
+ * current_dispatch EXPORTED variable
+ *
+ * - Pointer to current thread
+ * - Pointer to current kernel stack
+ * - Pointer to emulation dispatch structure for same
+ */
+BSS(active_threads,8*NCPUS)
+BSS(active_stacks,8*NCPUS)
+BSS(current_dispatch,8*NCPUS)
+
+/*
+ * Object:
+ * load_context EXPORTED function
+ *
+ * Load the state of the target thread
+ *
+ * Arguments:
+ * a0 thread_t
+ *
+ * Installs this as the current thread on all the various
+ * globals: active_threads, active_stacks
+ * Used to start the first thread on a CPU.
+ * Assumes splsched.
+ * Only used at boot time, and never again. So who cares.
+ */
+LEAF(load_context,1)
+ ldgp gp,0(pv)
+
+#if (NCPUS>1)
+ call_pal op_mfpr_whami
+ /*
+ * If we are a secondary we can release the
+ * boot stack now and let the next one go
+ */
+ IMPORT(master_cpu,4)
+ ldl s0,master_cpu
+ subl s0,v0,s0
+ beq s0,1f
+ IMPORT(slave_init_lock,8)
+ lda s0,slave_init_lock
+ stq zero,0(s0)
+1:
+#else
+ or zero,zero,v0
+#endif
+
+ /*
+ * Part 1: Load registers
+ */
+ ldq s0,THREAD_KERNEL_STACK(a0) /* get kernel stack */
+ ldq s1,THREAD_PCB(a0) /* PCB pointer */
+
+ /*
+ * Part 2: Critical section
+ *
+ * Setup important global variables.
+ * Relevant globals are bracketed in comments
+ *
+ */
+
+ lda t0,active_threads
+ s8addq v0,t0,t0
+ stq a0,0(t0) /* >>active_threads<< */
+
+ lda t0,active_stacks
+ s8addq v0,t0,t0
+ stq s0,0(t0) /* >>active_stacks<< */
+
+ lda sp,KERNEL_STACK_SIZE-MSB_SIZE-MEL_SIZE-MKS_SIZE(s0)
+
+ lda t0,KERNEL_STACK_SIZE-MSB_SIZE-TF_SIZE(s0)
+ stq t0,MSS_FRAMEP(s1)
+
+ stq s1,KERNEL_STACK_SIZE-MSB_SIZE+MSB_PCB(s0)
+
+ /* Do not swpctxt here, was done before calling */
+
+ ldq ra,MKS_PC(sp)
+ ldq sp,MKS_SP(sp)
+
+ or zero,zero,a0 /* return zero to thread_continue */
+ or ra,zero,pv /* enter/return issue */
+ ret zero,(ra)
+
+ END(load_context)
+
+/*
+ * Object:
+ * Switch_context EXPORTED function
+ *
+ * Save state of the current thread,
+ * and resume new thread,
+ * returning the current thread.
+ *
+ * Arguments:
+ * a0 old thread_t
+ * a1 function
+ * a2 new thread_t
+ *
+ * Installs this as the current thread on all the various
+ * globals: active_threads.
+ * Assumes interrupts are disabled.
+ */
+LEAF(Switch_context,3)
+ ldgp gp,0(pv)
+
+#if (NCPUS>1)
+ call_pal op_mfpr_whami
+#else
+ or zero,zero,v0
+#endif
+ lda t0,active_stacks
+ s8addq v0,t0,t0
+ ldq t0,0(t0) /* get old kernel stack */
+ stq a1,THREAD_SWAP_FUNC(a0)
+ stq t0,THREAD_KERNEL_STACK(a0)
+ bne a1,1f
+
+ /*
+ * Part 1: Save context
+ *
+ * We only need to save those registers that are callee-saved
+ * in C, everything else is already on the stack.
+ * We don't need to do this if there is an explicit continuation.
+ */
+#define MKS_OFFSET (KERNEL_STACK_SIZE-MSB_SIZE-MEL_SIZE-MKS_SIZE)
+ stq sp,MKS_OFFSET+MKS_SP(t0)
+ stq ra,MKS_OFFSET+MKS_PC(t0)
+
+ stq s0,MKS_OFFSET+MKS_S0(t0)
+ stq s1,MKS_OFFSET+MKS_S1(t0)
+ stq s2,MKS_OFFSET+MKS_S2(t0)
+ stq s3,MKS_OFFSET+MKS_S3(t0)
+ stq s4,MKS_OFFSET+MKS_S4(t0)
+ stq s5,MKS_OFFSET+MKS_S5(t0)
+ stq s6,MKS_OFFSET+MKS_S6(t0)
+#undef MKS_OFFSET
+1:
+ ldq t1,THREAD_KERNEL_STACK(a2) /* get new kernel stack */
+
+ /*
+ * Part 2: Setup new context
+ *
+ * Setup important global variables.
+ * Relevant globals are bracketed in comments
+ *
+ */
+
+ lda t0,active_threads
+ s8addq v0,t0,t0
+ stq a2,0(t0) /* >>active_threads<< */
+
+ lda t0,active_stacks
+ s8addq v0,t0,t0
+ stq t1,0(t0) /* >>active_stacks<< */
+
+ lda t0,KERNEL_STACK_SIZE-MSB_SIZE-MEL_SIZE-MKS_SIZE(t1)
+
+ ldq sp,MKS_SP(t0) /* get new sp value */
+
+ ldq t1,THREAD_TASK(a2)
+ ldq ra,MKS_PC(t0)
+ ldq t1,EML_DISPATCH(t1)
+ ldq s1,MKS_S1(t0)
+ ldq s2,MKS_S2(t0)
+ ldq s3,MKS_S3(t0)
+ ldq s4,MKS_S4(t0)
+ ldq s5,MKS_S5(t0)
+ ldq s6,MKS_S6(t0)
+ ldq s0,MKS_S0(t0)
+ lda t0,current_dispatch
+ s8addq v0,t0,t0
+ stq t1,0(t0) /* >>current_dispatch<< */
+ or a0,zero,v0
+ or ra,zero,pv /* enter/return issue */
+ ret zero,(ra)
+
+ /*
+ * We return the old thread in v0 for switch_context
+ * and a0 for thread_continue.
+ */
+
+ END(Switch_context)
+
+/*
+ * Object:
+ * stack_handoff EXPORTED function
+ *
+ * Arguments:
+ * a0 old thread *
+ * a1 new thread *
+ * a2 old task *
+ * a3 new task *
+ *
+ * Move the kernel stack from the old thread to the new thread.
+ * Installs the new thread in various globals.
+ * Changes address spaces if necessary.
+ * Assumes interrupts are disabled.
+ *
+ * Flames:
+ * Here we see why we want our own PAL code.
+ */
+
+LEAF(Stack_handoff,4)
+ ldgp gp,0(pv)
+reenter:
+#if (NCPUS>1)
+ call_pal op_mfpr_whami
+#else
+ or zero,zero,v0
+#endif
+ ldq t6,THREAD_PCB(a1) /* t6 is new pcb */
+ cmpeq a2,a3,pv
+ bne pv,stack_handoff_context
+
+ /*
+ * We have to switch address spaces.
+ */
+
+ ldq t2,TASK_MAP(a3) /* t2 is new map */
+ ldq t7,EML_DISPATCH(a3) /* t7 is new dispatch table */
+ ldq t3,MAP_PMAP(t2) /* t3 is new pmap */
+
+ lda t4,current_dispatch
+ s8addq v0,t4,t4
+ stq t7,0(t4)
+
+ ldl t4,PMAP_PID(t3) /* t4 is new tlbpid */
+#if (NCPUS > 1)
+ bge t4,stack_handoff_done
+#else
+ bge t4,stack_handoff_asn
+#endif
+
+ /*
+ * Bad news - we need a new tlbpid.
+ * We use assign_tlbpid and try again.
+ */
+
+ subq sp,40,sp
+ stq a0,0(sp)
+ stq a1,8(sp)
+ stq a2,16(sp)
+ stq a3,24(sp)
+ stq ra,32(sp)
+
+ or t3,zero,a0
+ CALL(pmap_assign_tlbpid)
+
+ ldq a0,0(sp)
+ ldq a1,8(sp)
+ ldq a2,16(sp)
+ ldq a3,24(sp)
+ ldq ra,32(sp)
+ addq sp,40,sp
+ br zero,reenter /* try again */
+
+stack_handoff_asn:
+ /*
+ * We have a tlbpid, swap process context now
+ */
+ stl t4,MSS_ASN(t6) /* ASN */
+
+stack_handoff_context: /* did not do it in cover code */
+ /* pcb ptr in t6, virtual */
+ stq sp,MSS_KSP(t6)
+ or a0,zero,t4
+ or v0,zero,t8
+ zap t6,0xf0,a0 /* make phys, quick and dirty */
+ call_pal op_swpctxt
+ or t4,zero,a0
+ or t8,zero,v0
+
+stack_handoff_done:
+
+ /*
+ * Change active_threads.
+ * Attach the exception link to the new pcb.
+ */
+
+ lda t4,active_threads
+ s8addq v0,t4,t4
+ stq a1,0(t4)
+
+ lda t4,active_stacks
+ s8addq v0,t4,t4
+ ldq t4,0(t4)
+ stq t6,KERNEL_STACK_SIZE-MSB_SIZE+MSB_PCB(t4)
+
+ /*
+ * Now if *we* had designed the PAL code...
+ * ..we'd saved those 64 bytes in the PCB!
+ */
+ lda t0,KERNEL_STACK_SIZE-1(zero) /* get exception frame */
+ or t0,sp,t0
+ lda t0,-TF_SIZE-MSB_SIZE+1(t0)
+
+ /* save the old ones */
+/* opt: seems TF_PC might be null ? for kernel threads, that is */
+ ldq t7,THREAD_PCB(a0)
+ lda t1,MSS_SAVEDF(t7) /* switch framep to pcb */
+ stq t1,MSS_FRAMEP(t7)
+ ldq t1,TF_R2(t0)
+ stq t1,MSS_T1(t7)
+ ldq t1,TF_R3(t0)
+ stq t1,MSS_T2(t7)
+ ldq t1,TF_R4(t0)
+ stq t1,MSS_T3(t7)
+ ldq t1,TF_R5(t0)
+ stq t1,MSS_T4(t7)
+ ldq t1,TF_R6(t0)
+ stq t1,MSS_T5(t7)
+ ldq t1,TF_R7(t0)
+ stq t1,MSS_T6(t7)
+ ldq t1,TF_PC(t0)
+ stq t1,MSS_PC(t7)
+
+ /* install new ones */
+ stq t0,MSS_FRAMEP(t6) /* switch framep to stack */
+ ldq t1,MSS_T1(t6)
+ stq t1,TF_R2(t0)
+ ldq t1,MSS_T2(t6)
+ stq t1,TF_R3(t0)
+ ldq t1,MSS_T3(t6)
+ stq t1,TF_R4(t0)
+ ldq t1,MSS_T4(t6)
+ stq t1,TF_R5(t0)
+ ldq t1,MSS_T5(t6)
+ stq t1,TF_R6(t0)
+ ldq t1,MSS_T6(t6)
+ stq t1,TF_R7(t0)
+ ldq t1,MSS_PC(t6)
+ stq t1,TF_PC(t0)
+
+
+ RET /* true return */
+ END(stack_handoff)
+
+/*
+ * Object:
+ * setjmp EXPORTED function
+ * _setjmp EXPORTED function alias
+ *
+ * Save current context for non-local goto's
+ *
+ * Arguments:
+ * a0 jmp_buf *
+ *
+ * Saves all registers that are callee-saved in the
+ * given longjmp buffer. Same as user level _setjmp,
+ * but kernel does not use fpa.
+ * Return 0
+ */
+LEAF(setjmp,1)
+XLEAF(_setjmp,1)
+ stq ra,JB_PC(a0)
+ stq sp,JB_SP(a0)
+ stq s0,JB_S0(a0)
+ stq s1,JB_S1(a0)
+ stq s2,JB_S2(a0)
+ stq s3,JB_S3(a0)
+ stq s4,JB_S4(a0)
+ stq s5,JB_S5(a0)
+ stq s6,JB_S6(a0)
+ or a0,zero,t4 /* PAL clobbers */
+ call_pal op_mfpr_ipl
+ stq v0,JB_PS(t4)
+ or zero,zero,v0 /* return zero */
+ RET
+ END(setjmp)
+
+
+/*
+ * Object:
+ * longjmp EXPORTED function
+ * _longjmp EXPORTED function
+ *
+ * Perform a non-local goto
+ *
+ * Arguments:
+ * a0 jmp_buf *
+ * a1 unsigned
+ *
+ * Restores all registers that are callee-saved from the
+ * given longjmp buffer. Same as user level _longjmp
+ * Return the second argument.
+ */
+LEAF(longjmp,2)
+XLEAF(_longjmp,2)
+ ldq ra,JB_PC(a0)
+ ldq sp,JB_SP(a0)
+ ldq s0,JB_S0(a0)
+ ldq s1,JB_S1(a0)
+ ldq s2,JB_S2(a0)
+ ldq s3,JB_S3(a0)
+ ldq s4,JB_S4(a0)
+ ldq s5,JB_S5(a0)
+ ldq s6,JB_S6(a0)
+ ldq a0,JB_PS(a0)
+ or a1,zero,t4
+ call_pal op_mtpr_ipl
+ or t4,zero,v0 /* return a1 */
+ RET
+ END(longjmp)
diff --git a/alpha/alpha/context.h b/alpha/alpha/context.h
new file mode 100644
index 00000000..50540b0a
--- /dev/null
+++ b/alpha/alpha/context.h
@@ -0,0 +1,85 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: context.h,v $
+ * Revision 2.2 93/01/14 17:12:20 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:12:17 af]
+ *
+ * Created.
+ * [92/05/31 af]
+ *
+ * Created.
+ *
+ */
+
+/*
+ * File: context.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 5/92
+ *
+ * Register save definitions for non-local goto's
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#ifndef _ALPHA_CONTEXT_H_
+#define _ALPHA_CONTEXT_H_ 1
+
+#ifndef ASSEMBLER
+typedef struct {
+ long s0;
+ long s1;
+ long s2;
+ long s3;
+ long s4;
+ long s5;
+ long s6;
+ long sp;
+ long pc;
+ long ps;
+} jmp_buf;
+
+typedef struct hw_pcb {
+ vm_offset_t ksp;
+ vm_offset_t esp;
+ vm_offset_t ssp;
+ vm_offset_t usp;
+ vm_offset_t ptbr;
+ long asn;
+ long ast_status;
+ long fpa_enabled;
+ long cycle_counter;
+ long process_unique;
+ long pal_scratch[6];
+} *hw_pcb_t;
+
+#endif ASSEMBLER
+#endif _ALPHA_CONTEXT_H_
diff --git a/alpha/alpha/cpu_number.h b/alpha/alpha/cpu_number.h
new file mode 100644
index 00000000..fd580f57
--- /dev/null
+++ b/alpha/alpha/cpu_number.h
@@ -0,0 +1,49 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: cpu_number.h,v $
+ * Revision 2.2 93/01/14 17:12:30 danner
+ * Created.
+ * [92/12/10 14:55:43 af]
+ *
+ */
+/*
+ * File: cpu_number.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Machine-dependent definitions for cpu identification.
+ *
+ * On the ALPHA, cpu_number() is the WHAMI IPR.
+ */
+
+#ifndef _ALPHA_CPU_NUMBER_H_
+#define _ALPHA_CPU_NUMBER_H_
+
+extern unsigned long cpu_number();
+
+#endif /* _ALPHA_CPU_NUMBER_H_ */
diff --git a/alpha/alpha/frame.h b/alpha/alpha/frame.h
new file mode 100644
index 00000000..ed70c21d
--- /dev/null
+++ b/alpha/alpha/frame.h
@@ -0,0 +1,66 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: frame.h,v $
+ * Revision 2.2 93/01/14 17:13:08 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:15:15 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+
+/*
+ * File: frame.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Definition of the stack setup generated by a trap,
+ * and expected by the op_rei PAL call (non-privileged)
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#ifndef _ALPHA_FRAME_H_
+#define _ALPHA_FRAME_H_ 1
+
+struct trap_frame {
+ unsigned long saved_r2;
+ unsigned long saved_r3;
+ unsigned long saved_r4;
+ unsigned long saved_r5;
+ unsigned long saved_r6;
+ unsigned long saved_r7;
+ unsigned long saved_pc;
+ unsigned long saved_ps;
+};
+
+#endif /* _ALPHA_FRAME_H_ */
diff --git a/alpha/alpha/lock.h b/alpha/alpha/lock.h
new file mode 100644
index 00000000..24941ab2
--- /dev/null
+++ b/alpha/alpha/lock.h
@@ -0,0 +1,128 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+
+/*
+ * HISTORY
+ * $Log: lock.h,v $
+ * Revision 2.2 93/01/14 17:13:16 danner
+ * Created.
+ * [92/11/20 af]
+ *
+ */
+
+/*
+ * Machine-dependent simple locks for the alpha.
+ */
+#ifndef _ALPHA_LOCK_H_
+#define _ALPHA_LOCK_H_
+
+#ifdef __GNUC__
+
+#if 0
+/*
+ * The code here depends on the GNU C compiler.
+ */
+
+#define _simple_lock_xchg_(lock, new_val) \
+ ({ register int _old_val_; \
+ asm volatile("xchgl %0, %2" \
+ : "=r" (_old_val_) \
+ : "0" (new_val), "m" (*(lock)) \
+ ); \
+ _old_val_; \
+ })
+
+#define simple_lock_init(l) \
+ ((l)->lock_data = 0)
+
+#define simple_lock(l) \
+ ({ \
+ while(_simple_lock_xchg_(l, 1)) \
+ while (*(volatile int *)&(l)->lock_data) \
+ continue; \
+ 0; \
+ })
+
+#define simple_unlock(l) \
+ (_simple_lock_xchg_(l, 0))
+
+#define simple_lock_try(l) \
+ (!_simple_lock_xchg_(l, 1))
+
+/*
+ * General bit-lock routines.
+ */
+#define bit_lock(bit, l) \
+ ({ \
+ asm volatile(" jmp 1f \n\
+ 0: btl %0, %1 \n\
+ jb 0b \n\
+ 1: lock \n\
+ btsl %0, %1 \n\
+ jb 0b" \
+ : \
+ : "r" (bit), "m" (*(volatile int *)(l))); \
+ 0; \
+ })
+
+#define bit_unlock(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" (bit), "m" (*(volatile int *)(l))); \
+ 0; \
+ })
+
+/*
+ * Set or clear individual bits in a long word.
+ * The locked access is needed only to lock access
+ * to the word, not to individual bits.
+ */
+#define i_bit_set(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btsl %0, %1" \
+ : \
+ : "r" (bit), "m" (*(l)) ); \
+ 0; \
+ })
+
+#define i_bit_clear(bit, l) \
+ ({ \
+ asm volatile(" lock \n\
+ btrl %0, %1" \
+ : \
+ : "r" (bit), "m" (*(l)) ); \
+ 0; \
+ })
+
+#endif
+#endif /* __GNUC__ */
+
+extern void simple_lock_pause();
+
+#endif /* _ALPHA_LOCK_H_ */
diff --git a/alpha/alpha/locore.S b/alpha/alpha/locore.S
new file mode 100644
index 00000000..8610058b
--- /dev/null
+++ b/alpha/alpha/locore.S
@@ -0,0 +1,755 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: locore.s,v $
+ * Revision 2.3 93/03/09 10:50:17 danner
+ * Removed debugging code that did not fly under GCC.
+ * Sigh, nothing like shoting yourself on the foot.
+ * [93/02/19 af]
+ *
+ * Revision 2.2 93/02/05 07:59:22 danner
+ * Removed ISP hacks. Console callbacks work now,
+ * so they are not truly needed anymore.
+ * [93/01/26 af]
+ *
+ * Change all mov inst. to or instructions to avoid a strange chip
+ * bug seen in starts.
+ * [93/01/12 jeffreyh]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:15:22 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+
+/*
+ * File: locore.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Low lever trap handlers
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ */
+#include <cpus.h>
+#include <mach_kdb.h>
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+#include <mach/alpha/vm_param.h>
+#include <alpha/alpha_cpu.h>
+#include <alpha/trap.h>
+
+#include "assym.s"
+
+#if __GNU_AS__
+#define LDGP(x) setgp x
+#else
+#define LDGP(x) ldgp gp,x
+#endif
+
+ .set noat
+ .set noreorder
+
+/*
+ * Object:
+ * gimmeabreak EXPORTED function
+ *
+ * Drop into the debugger
+ *
+ * Arguments:
+ * none
+ *
+ * :-))
+ */
+LEAF(gimmeabreak,0)
+ call_pal op_bpt
+ RET
+ END(gimmeabreak)
+
+/* when you need to patch away instructions.. */
+EXPORT(alpha_nop)
+ nop
+ nop
+
+/*
+ * Object:
+ * TRAP_dispatcher EXPORTED VECTOR
+ *
+ * Common trampoline for all traps
+ *
+ * Arguments: (std trap)
+ * R2: SCBv -- here
+ * R3: SCBp -- pointer to handler routine
+ *
+ * Here we do the common regsave and stack setup, then branch
+ * to the handler routine which was passed as argument.
+ * We can either branch here directly from PAL, or after a
+ * minimum register munging.
+ */
+
+ .align 5
+
+VECTOR(TRAP_dispatcher,IM_T1|IM_T2|IM_T3|IM_T4|IM_T5|IM_T6|IM_RA)
+ /*
+ * The PAL code has pushed on the stack:
+ * sp--> t1
+ * +8 t2
+ * +10 t3
+ * +18 t4
+ * +20 t5
+ * +28 t6
+ * +30 PC
+ * +38 PS
+ *
+ * When we do the pal-rei this is popped off.
+ * t5-t6 are always free, t1-t2 contain the
+ * values at the SCB entry that shipped us here,
+ * t3-t4 are trap-specific.
+ * [t5 might be important if we came from TRAP_generic]
+ *
+ * So we have three scratch registers to play with.
+ *
+ * NOTE: we might take an interrupt at any time here,
+ * re-entering this very same code ==> careful
+ * with handling SP.
+ */
+ ldq t6,TF_PS(sp) /* did we come from user mode ? */
+ srl t6,4,t1 /* NB: exec==kernel, superv==user */
+ blbs t1,from_user
+ /*
+ * From kernel mode. Push full exception frame on stack.
+ */
+ /* allocate the saved_state, and adjust sp */
+ lda sp,-MEL_TF-MSS_SIZE(sp)
+
+ stq gp,MSS_GP(sp) /* fill a WB line */
+ stq a0,MSS_A0(sp)
+ stq a1,MSS_A1(sp)
+ stq a2,MSS_A2(sp)
+#if MACH_KDB
+ /* The trapped SP has been aligned, so the old
+ * SP value must be recovered from the PS bits.
+ * This is done later in kdb_breakpoint, out of
+ * the main path. Normally we do not care. */
+#endif
+ stq sp,MSS_SIZE+MEL_EFRAME(sp) /* exc link */
+ lda a0,MSS_SIZE+MEL_TF(sp) /* recover hw framep */
+ stq a0,MSS_FRAMEP(sp)
+ or sp,zero,a0
+
+ /*
+ * Here we have:
+ * t1 -> ?????
+ * t6 -> PS
+ * sp == a0 -> saved status
+ */
+ br zero,exc_save_registers
+
+from_user:
+#if DEBUG
+ lda t1,0x1fff(zero)
+ and t1,sp,t1
+ lda t1,-0x1f80(t1)
+ beq t1,nobuggo
+ call_pal op_bpt
+nobuggo:
+#endif
+ /*
+ * From user mode. Stack has been switched and
+ * HW frame pushed.
+ * On the stack we now have the HW frame, and
+ * on top of it the stack_base structure.
+ * In the stack_base structure there is the PCB pointer.
+ * The PCB has a backpointer setup already to the HW frame.
+ * t1 is free, it might point here or not.
+ */
+ ldq t1,TF_SIZE+MSB_PCB(sp) /* PCB pointer */
+ lda sp,-MEL_TF-MKS_SIZE(sp) /* adjust sp, alloc MKS */
+ stq gp,MSS_GP(t1)
+ stq a0,MSS_A0(t1)
+ stq a1,MSS_A1(t1)
+ stq a2,MSS_A2(t1)
+ or t1,zero,a0
+ ldq t1,TF_PC+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_PC(a0)
+#if 1
+ /* aurghhh... iff we took a page fault */
+ ldq t1,TF_R2+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_T1(a0)
+ ldq t1,TF_R3+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_T2(a0)
+ ldq t1,TF_R4+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_T3(a0)
+ ldq t1,TF_R5+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_T4(a0)
+ ldq t1,TF_R6+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_T5(a0)
+ ldq t1,TF_R7+MEL_TF+MKS_SIZE(sp)
+ stq t1,MSS_T6(a0)
+#endif
+ /* the loading of a proper GP is left for later */
+
+exc_save_registers:
+ /*
+ * Here:
+ * sp -> <somewhere>
+ * a0 -> saved status (pcb or stack)
+ * t6 -> PS
+ * gp -> invalid (but saved)
+ * Saved already: t1-t6, pc, ps, gp, a0-a2, user sp
+ */
+ stq a3,MSS_A3(a0)
+ stq a4,MSS_A4(a0)
+ stq a5,MSS_A5(a0)
+ stq ra,MSS_RA(a0)
+
+ or t3,zero,a1
+ or t4,zero,a2
+ or t5,zero,a3
+
+ jsr ra,(t2)
+
+TRAP_end:
+ /*
+ * Q: are we are going back to user mode ?
+ *
+ * A: see restore_all_other_regs. We get a
+ * correct SP (in t1) and A0 values setup for us.
+ * A1 is zero if going off to userland.
+ */
+ bne a1,out_we_go
+
+ IMPORT(need_ast,4*NCPUS)
+
+#if (NCPUS>1)
+ or v0,zero,a2
+ call_pal op_mfpr_whami
+ lda a1,need_ast
+ s4addq v0,a1,a1
+ or a2,zero,v0
+#else
+ lda a1,need_ast
+#endif
+ ldl a1,0(a1)
+ bne a1,TRAP_ast
+out_we_go:
+
+ /* Memo: t1-t6 are scratch here.
+ t1 is used to hold the value of sp at exit */
+
+ /* XXX add prefetching, both ways */
+ ldq gp,MSS_GP(a0)
+/* ldq a0,MSS_A0(a0) fetched now, loaded later */
+ ldq a1,MSS_A1(a0)
+ ldq a2,MSS_A2(a0)
+
+ ldq a3,MSS_A3(a0)
+ ldq a4,MSS_A4(a0)
+ ldq a5,MSS_A5(a0)
+ ldq ra,MSS_RA(a0)
+
+ ldq a0,MSS_A0(a0)
+
+ /* Only here can we let the SP go */
+ or t1,zero,sp
+
+ call_pal op_rei
+
+
+ END(TRAP_dispatcher)
+
+/*
+ * Object:
+ * save_all_other_regs LOCAL function
+ *
+ * Argument:
+ * a0 alpha_saved_state *
+ *
+ * Save all registers that TRAP_dispatcher did not.
+ * Same state as if dispatcher branched here stright.
+ * [Still unoptimized]
+ * Companion routine restore_all_other_regs SHOULD NOT BE MOVED
+ *
+ */
+STATIC_LEAF(save_all_other_regs,1)
+ stq v0,MSS_V0(a0)
+ stq t0,MSS_T0(a0)
+ stq t7,MSS_T7(a0)
+
+ stq t8,MSS_T8(a0)
+ stq t9,MSS_T9(a0)
+ stq t10,MSS_T10(a0)
+ stq t11,MSS_T11(a0)
+
+ stq t12,MSS_T12(a0)
+ stq s0,MSS_S0(a0)
+ stq s1,MSS_S1(a0)
+ stq s2,MSS_S2(a0)
+
+ stq s3,MSS_S3(a0)
+ stq s4,MSS_S4(a0)
+ stq s5,MSS_S5(a0)
+ stq s6,MSS_S6(a0)
+
+ stq at,MSS_AT(a0)
+
+#if MACH_KDB
+ /*
+ * For debugger's sake, we declare here a VECTOR routine
+ * so that we can tell from ra=restore_all_other_regs
+ * that this is an exception frame.
+ */
+ END(save_all_other_regs)
+VECTOR(locore_exception_return, 0)
+
+#endif /* MACH_KDB */
+
+ /*
+ * NOTE: This is just a (strange) call into C.
+ */
+ or ra,zero,pv
+ jsr ra,(pv)
+ /* so that ra==restore_all_other_regs */
+ /* DO NOT MOVE THE FOLLOWING THEN */
+
+/*
+ * Object:
+ * restore_all_other_regs LOCAL function
+ *
+ * Argument:
+ * sp+MEL_FRAME alpha_saved_state **
+ *
+ * Restore all registers that TRAP_end will not.
+ * [Still unoptimized]
+ *
+ */
+#if MACH_KDB
+restore_all_other_regs:
+#else
+STATIC_XLEAF(restore_all_other_regs,1)
+#endif
+ /*
+ * The following test assumes kernel stacks are
+ * aligned to their size. MUST be true, we shall
+ * need (mips docet) only 8k, e.g. twice what
+ * a 32 bit RISC needs. Less than that, actually.
+ */
+ lda a1,KERNEL_STACK_SIZE-1 (zero) /* < 32k ! */
+ and a1,sp,a1
+ lda a1,-(KERNEL_STACK_SIZE-MKS_SIZE-MEL_SIZE-MSB_SIZE) (a1)
+ bne a1,to_kernel_mode
+to_user_mode:
+ ldq a0,MKS_SIZE+MEL_SIZE+MSB_PCB(sp) /* PCB pointer */
+ lda t1,MEL_TF+MKS_SIZE(sp) /* hw frame */
+restore_them_registers:
+ ldq v0,MSS_V0(a0)
+ ldq t0,MSS_T0(a0)
+ ldq t7,MSS_T7(a0)
+
+ ldq t8,MSS_T8(a0)
+ ldq t9,MSS_T9(a0)
+ ldq t10,MSS_T10(a0)
+ ldq t11,MSS_T11(a0)
+
+ ldq t12,MSS_T12(a0)
+ /* optimize away these, later */
+ ldq s0,MSS_S0(a0)
+ ldq s1,MSS_S1(a0)
+ ldq s2,MSS_S2(a0)
+
+ ldq s3,MSS_S3(a0)
+ ldq s4,MSS_S4(a0)
+ ldq s5,MSS_S5(a0)
+ ldq s6,MSS_S6(a0)
+
+ ldq at,MSS_AT(a0)
+
+ br zero,TRAP_end
+
+to_kernel_mode:
+ or sp,zero,a0 /* saved_state pointer */
+ lda t1,MSS_SIZE+MEL_TF(sp)
+ br zero,restore_them_registers
+ END(save_all_other_regs)
+
+
+/*
+ * Object:
+ * TRAP_ast EXPORTED VECTOR
+ *
+ */
+VECTOR(TRAP_ast,0)
+ /* See TRAP_end (assume jumped-to by restore_all_other_regs)
+ for the state here. Its ok to just save_all_other_regs */
+ IMPORT(ast_taken,4)
+ lda ra,ast_taken
+ br zero,save_all_other_regs
+ /* NOTREACHED */
+ END(TRAP_ast)
+
+/*
+ * Object:
+ * TRAP_generic EXPORTED VECTOR
+ *
+ * Most trap get here from the SCB, we play with regs
+ * a bit then go to the dispatcher code.
+ *
+ */
+VECTOR(TRAP_generic,IM_T1|IM_T2|IM_T3|IM_T4|IM_T5|IM_T6|IM_RA)
+ /*
+ * t1 contains this address
+ * t2 contains the trap argument
+ */
+ or t2,zero,t5
+ br t2,TRAP_dispatcher /* gets back here */
+ LDGP(0(t2))
+ /*
+ * Now we got to save registers, but we are
+ * all setup for calling C already.
+ * save_all_other_regs is also ready to avoid
+ * branching back here.
+ */
+ lda ra,trap
+ br zero,save_all_other_regs
+
+ /* NOTREACHED */
+
+ END(TRAP_generic)
+
+/*
+ * Object:
+ * TRAP_interrupt EXPORTED VECTOR
+ *
+ * All interrupts get here from the SCB, we play with regs
+ * a bit then go to the dispatcher code.
+ *
+ */
+VECTOR(TRAP_interrupt,IM_T1|IM_T2|IM_T3|IM_T4|IM_T5|IM_T6|IM_RA)
+ /*
+ * t1 contains this address
+ * t2 contains the interrupt handler
+ */
+ or t2,zero,t5
+ br t2,TRAP_dispatcher /* gets back here */
+ LDGP(0(t2))
+ /*
+ * Now we got to save registers, but we are
+ * all setup for calling C already.
+ * save_all_other_regs is also ready to avoid
+ * branching back here.
+ */
+
+ /*
+ * We vector interrupts in two ways:
+ * (1) the argument t2 above is a function pointer
+ * (2) the argument t2 above is a small integer
+ * In the first case we call the function directly,
+ * recognizing the case by the high bit set (a k0seg
+ * address). In the second case we dispatch to a
+ * C function for further processing. Note that
+ * argument registers have been saved already
+ */
+ or t5,zero,ra
+ blt t5,save_all_other_regs
+
+ or t5,zero,a1 /* needs further vectoring */
+ IMPORT(interrupt,8)
+ lda ra,interrupt
+ br zero,save_all_other_regs
+
+ /* NOTREACHED */
+
+ END(TRAP_interrupt)
+
+/*
+ * Object:
+ * thread_syscall_return EXPORTED function
+ *
+ * Return from syscall
+ *
+ * Arguments:
+ * a0 int
+ *
+ * Pop kernel stack, to get out to userland. Returns to
+ * the user the provided argument as result of the syscall.
+ */
+LEAF(Thread_syscall_return,1)
+
+ or a0,zero,v0 /* argument is return code */
+ lda t0,KERNEL_STACK_SIZE-1(zero) /* get exception frame */
+ or t0,sp,t0
+ /* pop the stack [later: again] */
+ lda sp,-MKS_SIZE-MEL_SIZE-MSB_SIZE+1(t0)
+ lda t1,-TF_SIZE-MSB_SIZE+1(t0)
+ /* get pcb pointer */
+ ldq a0,TF_SIZE+MSB_PCB(t1)
+ ldq s0,MSS_S0(a0)
+ ldq s1,MSS_S1(a0)
+ ldq s2,MSS_S2(a0)
+ ldq s3,MSS_S3(a0)
+ ldq s4,MSS_S4(a0)
+ ldq s5,MSS_S5(a0)
+ ldq s6,MSS_S6(a0)
+ or zero,zero,a1 /* to user for sure */
+ br zero,TRAP_end
+ END(thread_syscall_return)
+
+/*
+ * Object:
+ * thread_bootstrap_return EXPORTED function
+ *
+ * Startup a USER-MODE thread.
+ * ALSO, return from user-mode page faults.
+ *
+ * Arguments:
+ * a0 int
+ *
+ * Pop kernel stack, to get out to userland. Returns to
+ * the user with the initial register status loaded up.
+ */
+LEAF(thread_bootstrap_return,0)
+
+ lda t0,KERNEL_STACK_SIZE-1(zero) /* get 1st exception frame */
+ or t0,sp,t0
+ /* pop the stack */
+ lda sp,-MKS_SIZE-MEL_SIZE-MSB_SIZE+1(t0)
+ /* get pcb pointer */
+ ldq a0,MKS_SIZE+MEL_SIZE+MSB_PCB(sp)
+ /* set backptr to hw frame */
+ lda t0,MKS_SIZE+MEL_TF(sp)
+ stq t0,MSS_FRAMEP(a0)
+/*sanity*/
+stq zero,MKS_SIZE+MEL_EFRAME(sp)
+
+ /* Loadup HW frame now */
+
+ lda a1,alpha_initial_ps_value(zero)
+ stq a1,MKS_SIZE+MEL_TF+TF_PS(sp) /* user-mode ps */
+
+ ldq t0,MSS_PC(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_PC(sp)
+
+ ldq t0,MSS_T1(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_R2(sp)
+ ldq t0,MSS_T2(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_R3(sp)
+ ldq t0,MSS_T3(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_R4(sp)
+ ldq t0,MSS_T4(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_R5(sp)
+ ldq t0,MSS_T5(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_R6(sp)
+ ldq t0,MSS_T6(a0)
+ stq t0,MKS_SIZE+MEL_TF+TF_R7(sp)
+
+ /* Now the S regs */
+ ldq s0,MSS_S0(a0)
+ ldq s1,MSS_S1(a0)
+ ldq s2,MSS_S2(a0)
+ ldq s3,MSS_S3(a0)
+ ldq s4,MSS_S4(a0)
+ ldq s5,MSS_S5(a0)
+ ldq s6,MSS_S6(a0)
+
+ /* done */
+ br zero,restore_all_other_regs
+ END(thread_bootstrap_return)
+
+
+/*
+ * Object:
+ * call_continuation EXPORTED function
+ *
+ * Arguments:
+ * a0 int
+ *
+ * Invoke the given function with a cleaned up kernel stack.
+ * Stack is set as if just in from user mode.
+ */
+LEAF(call_continuation,1)
+ lda t0,KERNEL_STACK_SIZE-1(zero) /* pop the stack */
+ or t0,sp,t0
+ lda sp,-MKS_SIZE-MEL_SIZE-MSB_SIZE+1(t0)
+ or a0,zero,ra
+ or a0,zero,pv
+ jmp zero,(ra) /* call continuation */
+ END(call_continuation)
+
+
+/*
+ * Object:
+ * kdb_breakpoint EXPORTED function
+ *
+ * Kernel breakpoint trap
+ *
+ * Arguments:
+ * a0 saved_status *
+ *
+ * Save as much state as possible on stack including IPRs
+ * status, invoke debugger. On return, restore and continue.
+ */
+VECTOR(kdb_breakpoint,0)
+ /* a0-a5 saved already, sp, ps, gp, ra too */
+ LDGP(0(t2)) /* we know how dispatch works */
+ stq v0,MSS_V0(a0)
+ stq t0,MSS_T0(a0)
+
+ /* t6 holds the PS */
+ srl t6,4,t1 /* NB: exec==kernel, superv==user */
+ blbs t1,kdb_of_user
+
+ /* find SP value before trapping */
+ srl t6,56,t0 /* t6 still holds PS. Alignement.. */
+ lda v0,MSS_SIZE+MEL_TF(sp) /* .. HW frame ... */
+ addq t0,v0,t0 /* .. adjust for alignment ... */
+ lda t0,MEL_SIZE-MEL_TF(t0) /* .. got it */
+ stq t0,MSS_SP(a0)
+ br zero,copy_frame
+
+kdb_of_user:
+ ldq t0,MSS_USP(a0) /* user SP */
+ lda v0,MKS_SIZE+MEL_TF(sp)
+ stq t0,MSS_SP(a0)
+
+copy_frame:
+ stq sp,MSS_BAD(a0) /* cut this crap, later */
+ /*
+ * Copy the exception frame where ddb can look at.
+ * This is not normally done on traps.
+ */
+ stq t6,MSS_PS(a0) /* exc ps */
+
+ ldq t0,TF_PC(v0) /* exc pc */
+ subq t0,4,t0 /* is next PC */
+ stq t0,MSS_PC(a0)
+
+ ldq t0,TF_R2(v0)
+ stq t0,MSS_T1(a0)
+ ldq t0,TF_R3(v0)
+ stq t0,MSS_T2(a0)
+ ldq t0,TF_R4(v0)
+ stq t0,MSS_T3(a0)
+ ldq t0,TF_R5(v0)
+ stq t0,MSS_T4(a0)
+ ldq t0,TF_R6(v0)
+ stq t0,MSS_T5(a0)
+ ldq t0,TF_R7(v0)
+ stq t0,MSS_T6(a0)
+
+ /* duplicated so that save/restore are self-debuggable */
+ stq t7,MSS_T7(a0)
+ stq t8,MSS_T8(a0)
+ stq t9,MSS_T9(a0)
+ stq t10,MSS_T10(a0)
+ stq t11,MSS_T11(a0)
+ stq t12,MSS_T12(a0)
+
+ stq s0,MSS_S0(a0)
+ stq s1,MSS_S1(a0)
+ stq s2,MSS_S2(a0)
+ stq s3,MSS_S3(a0)
+ stq s4,MSS_S4(a0)
+ stq s5,MSS_S5(a0)
+ stq s6,MSS_S6(a0)
+
+ stq at,MSS_AT(a0)
+/* stq zero,MSS_BAD(a0) used for trapped SP */
+
+ addq zero,T_BP,t0
+ stq t0,MSS_CAUSE(a0)
+
+ mov zero,a1
+ CALL(kdb_trap)
+
+ /*
+ * The following test assumes kernel stacks are
+ * aligned to their size. MUST be true, we shall
+ * need (mips docet) only 8k, e.g. twice what
+ * a 32 bit RISC needs. Less than that, actually.
+ */
+ lda a1,KERNEL_STACK_SIZE-1 (zero) /* < 32k ! */
+ and a1,sp,a1
+ lda a1,-(KERNEL_STACK_SIZE-MKS_SIZE-MEL_SIZE-MSB_SIZE) (a1)
+ beq a1,to_user_mode_1
+ or sp,zero,a0 /* saved_state pointer */
+ lda t1,MSS_SIZE+MEL_TF(sp)
+restore_them_1:
+#if 0
+ will play with matches later
+ /* restore SP, possibly modified with KDB */
+ ldq sp,MSS_BAD(a0)
+ stq t0,MSS_SP(a0)
+#endif
+ ldq t0,MSS_T1(a0)
+ stq t0,TF_R2(t1)
+ ldq t0,MSS_T2(a0)
+ stq t0,TF_R3(t1)
+ ldq t0,MSS_T3(a0)
+ stq t0,TF_R4(t1)
+ ldq t0,MSS_T4(a0)
+ stq t0,TF_R5(t1)
+ ldq t0,MSS_T5(a0)
+ stq t0,TF_R6(t1)
+ ldq t0,MSS_T6(a0)
+ stq t0,TF_R7(t1)
+ ldq t0,MSS_PC(a0)
+ stq t0,TF_PC(t1)
+
+ ldq v0,MSS_V0(a0)
+ ldq t0,MSS_T0(a0)
+
+ ldq t7,MSS_T7(a0)
+ ldq t8,MSS_T8(a0)
+ ldq t9,MSS_T9(a0)
+ ldq t10,MSS_T10(a0)
+ ldq t11,MSS_T11(a0)
+ ldq t12,MSS_T12(a0)
+
+ ldq s0,MSS_S0(a0)
+ ldq s1,MSS_S1(a0)
+ ldq s2,MSS_S2(a0)
+ ldq s3,MSS_S3(a0)
+ ldq s4,MSS_S4(a0)
+ ldq s5,MSS_S5(a0)
+ ldq s6,MSS_S6(a0)
+
+ ldq at,MSS_AT(a0)
+
+ br zero,TRAP_end
+
+to_user_mode_1:
+ ldq a0,MKS_SIZE+MEL_SIZE+MSB_PCB(sp) /* PCB pointer */
+ lda t1,MKS_SIZE+MEL_TF(sp) /* hw frame */
+ br zero,restore_them_1
+
+ END(kdb_breakpoint)
diff --git a/alpha/alpha/mach_param.h b/alpha/alpha/mach_param.h
new file mode 100644
index 00000000..c80e986e
--- /dev/null
+++ b/alpha/alpha/mach_param.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: mach_param.h,v $
+ * Revision 2.2 93/01/14 17:13:23 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:15:30 af]
+ *
+ * Created.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * File: mach_param.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 5/92
+ *
+ * Machine-dependent parameters for Alpha.
+ */
+#ifndef _ALPHA_MACH_PARAM_H_
+#define _ALPHA_MACH_PARAM_H_ 1
+
+/*
+ * The clock frequency, unfortunately, was not defined once
+ * and forall in the architecture. This definition is here
+ * to tell MI code we might have a high frequency clock.
+ * Autoconf code takes care of fixing up the relevant vars
+ * in kern/mach_clock.c.
+ */
+
+#define HZ (1000) /* clock tick each 1 ms. */
+
+#endif /* _ALPHA_MACH_PARAM_H_ */
diff --git a/alpha/alpha/machspl.h b/alpha/alpha/machspl.h
new file mode 100644
index 00000000..2f0bcf6d
--- /dev/null
+++ b/alpha/alpha/machspl.h
@@ -0,0 +1,68 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: machspl.h,v $
+ * Revision 2.2 93/05/15 20:59:16 mrt
+ * Used to be machparam.h
+ * [93/05/15 mrt]
+ *
+ *
+ */
+/*
+ * File: machspl.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/89
+ *
+ * Machine-dependent SPL definitions.
+ */
+
+#ifndef _MACHINE_MACHSPL_H_
+#define _MACHINE_MACHSPL_H_
+
+#include <mach/machine/vm_types.h>
+#include <alpha/alpha_cpu.h>
+
+/*
+ * We trust THE BOOK that this is truly box-independent.
+ */
+
+typedef natural_t spl_t;
+
+extern spl_t alpha_swap_ipl( spl_t );
+
+#define splx(s) (void) alpha_swap_ipl(s)
+#define spl0() alpha_swap_ipl(ALPHA_IPL_0)
+#define splsoftclock() alpha_swap_ipl(ALPHA_IPL_SOFTC)
+#define splimp() alpha_swap_ipl(ALPHA_IPL_IO)
+#define splbio() alpha_swap_ipl(ALPHA_IPL_IO)
+#define spltty() alpha_swap_ipl(ALPHA_IPL_IO)
+#define splclock() alpha_swap_ipl(ALPHA_IPL_CLOCK)
+#define splhigh() alpha_swap_ipl(ALPHA_IPL_HIGH)
+#define splvm() splhigh()
+#define splsched() splhigh()
+
+#endif _MACHINE_MACHSPL_H_
diff --git a/alpha/alpha/parse_args.c b/alpha/alpha/parse_args.c
new file mode 100644
index 00000000..3f888eee
--- /dev/null
+++ b/alpha/alpha/parse_args.c
@@ -0,0 +1,475 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: parse_args.c,v $
+ * Revision 2.3 93/03/09 10:50:27 danner
+ * String protos.
+ * [93/03/07 af]
+ *
+ * Revision 2.2 93/02/05 07:59:28 danner
+ * Since there a (twisted) way to pass arguments to the
+ * kernel, resurvived and adapted my old mips code.
+ * [93/02/02 af]
+ * Created empty.
+ * [92/12/10 af]
+ *
+ */
+/*
+ * File: parse_args.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 10/89
+ *
+ * Command line parsing for the Mach kernel on Alpha.
+ *
+ */
+
+/*
+ * Switches are names of kernel variables, which can be
+ * either simply set to 1 (flags) or set to some specific
+ * value provided in the comand line.
+ *
+ * Example:
+ * setenv BOOT_OSFLAGS "boothowto=0xc memlim=0xc00000"
+ *
+ * Certain variables might also require some special processing
+ * before and/or after being set. For this reason we invoke
+ * a function that is associated with the variable, passing the
+ * desired new value as argument. Responsibility for setting
+ * the variable to the desired value remains with the function.
+ *
+ */
+
+#include <mach/std_types.h>
+#include <kern/strings.h>
+#include <alpha/prom_interface.h>
+
+
+typedef struct {
+ char *name; /* variable's name */
+ vm_offset_t address; /* variable's address */
+ void (*setf)(vm_offset_t var, char *val);
+ /* how to modify the variable */
+} switch_table;
+
+void int_setf (vm_offset_t var, char *val); /* default setf method */
+#define bool_setf int_setf
+void nat_setf (vm_offset_t var, char *val); /* natural wordsize */
+void string_setf (vm_offset_t var, char *val); /* for strings */
+void uchar_setf (vm_offset_t var, char *val); /* for unsigned chars */
+
+static char* isa_switch(
+ char *str,
+ switch_table **swptr);
+
+static void scan_cmdline(
+ char *cmd,
+ int *p_argc,
+ char **argv);
+
+integer_t string_to_int( char *s);
+
+static unsigned int char_to_uint( char c );
+
+/*
+ * Object:
+ * kernel_switch EXPORTED structure
+ *
+ * Kernel arguments table.
+ * Add/remove variables from here and create the setf methods
+ * in the module that exports that variable, if necessary.
+ * The most common methods are defined herein.
+ *
+ * This table is exported even if it need not, so that it
+ * can be patched with ADB if necessary.
+ *
+ */
+extern boolean_t
+ askme;
+
+extern vm_size_t
+ zdata_size, memlimit, page_size;
+
+extern int
+ rcline, boothowto, pmap_debug;
+
+extern unsigned char
+ scsi_initiator_id[];
+
+unsigned char
+ *monitor_types[4];
+
+/* The table proper */
+
+switch_table kernel_switch[] = {
+ { "askme", (vm_offset_t)&askme,
+ bool_setf },
+ { "boothowto", (vm_offset_t)&boothowto,
+ int_setf },
+ { "memlimit", (vm_offset_t)&memlimit,
+ nat_setf },
+ { "monitor0", (vm_offset_t)&monitor_types[0],
+ string_setf },
+ { "monitor1", (vm_offset_t)&monitor_types[1],
+ string_setf },
+ { "monitor2", (vm_offset_t)&monitor_types[2],
+ string_setf },
+ { "monitor3", (vm_offset_t)&monitor_types[3],
+ string_setf },
+ { "page_size", (vm_offset_t)&page_size,
+ nat_setf },
+ { "pmap_debug", (vm_offset_t)&pmap_debug,
+ int_setf },
+ { "rcline", (vm_offset_t)&rcline,
+ int_setf },
+ { "scsi0", (vm_offset_t)(&scsi_initiator_id[0]),
+ uchar_setf },
+ { "scsi1", (vm_offset_t)(&scsi_initiator_id[1]),
+ uchar_setf },
+ { "zdata_size", (vm_offset_t)&zdata_size,
+ nat_setf },
+ { 0, 0, 0 }
+};
+
+
+/*
+ * Object:
+ * parse_args EXPORTED function
+ *
+ * Kernel argument parsing function
+ *
+ * Invoked at kernel startup, _before_ configuration
+ */
+static char init_args[12] = "-xx";
+
+void
+parse_args()
+{
+ register int i;
+ switch_table *variable;
+ char *vptr;
+ int argc;
+#define MAX_ARGS 24
+ char *argv[MAX_ARGS+1];
+ static char flags[128];
+
+ flags[0] = 0;
+ prom_getenv( PROM_E_BOOTED_OSFLAGS, flags, sizeof(flags) );
+
+ scan_cmdline( flags, &argc, argv );
+
+ for (i = 0/*1*/; i < argc; i++) {
+
+ if (argv[i][0] == '-') {
+ dprintf("Init args: %s\n", argv[i]);
+ strncpy(init_args, argv[i], sizeof init_args - 1);
+ continue;
+ }
+ if (vptr = isa_switch(argv[i], &variable)) {
+ dprintf("Boot option: %s", variable->name);
+ (variable->setf) (variable->address, vptr);
+ dprintf("\n");
+ }
+ }
+}
+
+
+/*
+ * Object:
+ * isa_switch LOCAL function
+ *
+ * Attempt a match between a token and a switch.
+ *
+ * Returns 0 if no match, a pointer to the ascii
+ * representation of the desired value for the switch
+ * in case of a perfect match.
+ * "foo" will match with "foo" "foo=" "foo=baz"
+ */
+static char*
+isa_switch(
+ char *str,
+ switch_table **swptr)
+{
+ register switch_table *sw = kernel_switch;
+ register char *cp0, *cp1, c;
+
+ if (!str) /* sanity */
+ return 0L;
+ while (cp0 = sw->name) {
+ cp1 = str;
+ /* This is faster if the table is alphabetically ordered */
+ while ((c = *cp0++) == *cp1++)
+ if (c == 0)
+ break; /* a true prefix */
+ if (c == 0) {
+ /* prefix match, but we want a full match */
+ *swptr = sw;
+ if ((c = *--cp1) == 0)
+ return cp1;
+ if (c == '=')
+ return cp1 + 1;
+ }
+ sw++;
+ }
+
+ return (char*)0;
+}
+
+
+/*
+ * Object:
+ * int_setf EXPORTED function
+ *
+ * This is the "default" setf method.
+ * Understands integers, which default to "1"
+ * if no value is provided.
+ */
+void
+int_setf(
+ vm_offset_t var,
+ char *val)
+{
+ unsigned int binval;
+
+ if (*val == 0)
+ binval = 1;
+ else
+ binval = string_to_int(val);
+
+ dprintf(" = x%x", binval);
+ * (unsigned int *) var = binval;
+}
+
+
+/*
+ * Object:
+ * nat_setf EXPORTED function
+ *
+ * Understands naturally-sized, unsigned integers.
+ * No defaults.
+ */
+void
+nat_setf(
+ vm_offset_t var,
+ char *val)
+{
+ natural_t binval;
+
+ binval = string_to_int(val);
+
+ dprintf(" = x%lx", binval);
+ * (natural_t *) var = binval;
+}
+
+
+/*
+ * Object:
+ * uchar_setf EXPORTED function
+ *
+ * Understands single-byte integers, no default.
+ */
+void
+uchar_setf(
+ vm_offset_t var,
+ char *val)
+{
+ unsigned char binval;
+
+ binval = string_to_int(val);
+
+ dprintf(" = x%x", binval);
+ * (unsigned char *) var = binval;
+}
+
+/*
+ * Object:
+ * string_setf EXPORTED function
+ *
+ * This is a setf method for strings, which are
+ * just pointer-assigned.
+ * NOTE: might have to copy string into safe place
+ *
+ */
+void
+string_setf(
+ vm_offset_t var,
+ char *val)
+{
+ dprintf(" = %s", val);
+ * (char **) var = val;
+}
+
+
+/*
+ * Object:
+ * string_to_int EXPORTED function
+ *
+ * Convert a string into an integer.
+ * Understands decimal (default), octal and hex numbers.
+ * It also understands Ada-like numbers of the form
+ * #base#digits
+ * Defaults to 0 for all strange strings.
+ *
+ */
+integer_t
+string_to_int( char *s)
+{
+ char c;
+ unsigned int base = 10, d;
+ int neg = 1;
+ integer_t val = 0;
+
+ if ((s == 0) || ((c = *s++) == 0))
+ goto out;
+
+ /* skip spaces if any */
+ while ((c == ' ') || (c == '\t'))
+ c = *s++;
+
+ /* parse sign, allow more than one (frills) */
+ while (c == '-') {
+ neg = -neg;
+ c = *s++;
+ }
+
+ /* parse base specification, if any */
+ if ((c == '0') || (c == '#')) {
+ if (c == '0') {
+ c = *s++;
+ switch (c) {
+ case 'X':
+ case 'x':
+ base = 16;
+ break;
+ case 'B':
+ case 'b':
+ base = 2;
+ break;
+ default:
+ base = 8;
+ break;
+ }
+ } else {
+ c = *s++;
+ while ((d = char_to_uint(c)) < base) {
+ val *= base;
+ val += d;
+ c = *s++;
+ }
+ base = val;
+ val = 0;
+ }
+ c = *s++; /* forgiving of not '#' */
+ }
+
+ /* parse number proper */
+ while ((d = char_to_uint(c)) < base) {
+ val *= base;
+ val += d;
+ c = *s++;
+ }
+ val *= neg;
+out:
+ return val;
+}
+
+
+/*
+ * Object:
+ * char_to_uint LOCAL function
+ *
+ * Convert a character into an integer.
+ * Besides the numbers 0..9 understands all
+ * letters of the alphabet, starting at a=10.
+ * Case insensitive, returns infinity for bogus characters.
+ *
+ */
+static unsigned int
+char_to_uint(
+ register char c)
+{
+ if ((c >= '0') && (c <= '9'))
+ return c - '0';
+
+ if ((c >= 'a') && (c <= 'z'))
+ return c - 'a' + 10;
+
+ if ((c >= 'A') && (c <= 'Z'))
+ return c - 'A' + 10;
+
+ return ~0;
+}
+
+
+/*
+ * Object:
+ * machine_get_boot_flags EXPORTED function
+ *
+ * Pass up any explicit arguments to /etc/init
+ *
+ */
+char *
+machine_get_boot_flags(str)
+ char *str;
+{
+ strncpy(str, init_args, sizeof init_args);
+ return(str + strlen(str));
+}
+
+/*
+ * Object:
+ * scan_cmdline EXPORTED function
+ *
+ * Parse boot 'flags' into U*x-like cmd vector.
+ */
+static void
+scan_cmdline(
+ char *cmd,
+ int *p_argc,
+ char **argv)
+{
+ register int c, argc;
+ register char *p = cmd;
+
+ c = *p; argc = 0;
+ while (c) {
+ /* One more in the bag */
+ argv[argc++] = p;
+
+ /* look for separator */
+ while ((c != ' ') && (c != '\t') && c)
+ c = *++p;
+
+ /* terminate string */
+ *p = 0;
+
+ /* skip blanks */
+ while ((c == ' ') || (c == '\t'))
+ c = *++p;
+ }
+ *p_argc = argc;
+}
+
diff --git a/alpha/alpha/pcb.c b/alpha/alpha/pcb.c
new file mode 100644
index 00000000..f0d35942
--- /dev/null
+++ b/alpha/alpha/pcb.c
@@ -0,0 +1,893 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: pcb.c,v $
+ * Revision 2.4 93/05/15 19:11:18 mrt
+ * machparam.h -> machspl.h
+ *
+ * Revision 2.3 93/01/19 08:59:32 danner
+ * Horrible mess to cope with cache-coherency bugs on ADU MP.
+ * There is also some other bug at large that prevents proper
+ * cleanup of the switch functions. Sigh.
+ * [93/01/15 af]
+ *
+ * Revision 2.2 93/01/14 17:13:36 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:15:36 af]
+ *
+ * Created.
+ * [92/12/10 14:59:58 af]
+ *
+ */
+/*
+ * File: pcb.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Alpha PCB management
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <cpus.h>
+#include <mach_kdb.h>
+#include <mach_debug.h>
+
+#include <mach/std_types.h>
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/thread_status.h>
+#include <mach/vm_param.h>
+#include <kern/mach_param.h>
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+#include <kern/syscall_emulation.h>
+#include <vm/vm_page.h>
+#include <vm/pmap.h>
+#include <alpha/alpha_cpu.h>
+#include <alpha/thread.h>
+
+/*
+ * stack_attach:
+ *
+ * Attach a kernel stack to a thread.
+ */
+
+void stack_attach(thread, stack, continuation)
+ register thread_t thread;
+ register vm_offset_t stack;
+ register void (*continuation)();
+{
+ register pcb_t pcb = thread->pcb;
+
+extern boolean_t debug_verbose;
+if (debug_verbose) db_printf("attach stack %x to thread %x (%x)\n",
+ stack, thread, continuation);
+ thread->kernel_stack = stack;
+
+ /*
+ * Setup the saved kernel state to run the continuation.
+ * Point the exception link back at the exception frame.
+ */
+ pcb->mss.hw_pcb.ksp = (vm_offset_t)&STACK_MEL(stack)->tf;
+
+ STACK_MSB(stack)->pcb = pcb;
+ STACK_MKS(stack)->pc = (vm_offset_t) continuation;
+ STACK_MKS(stack)->sp = (vm_offset_t) STACK_MKS(stack);
+ STACK_MEL(stack)->eframe = &pcb->mss;
+ pcb->mss.saved_frame.saved_ps = alpha_initial_ps_value;
+ STACK_MEL(stack)->tf = pcb->mss.saved_frame;
+}
+
+/*
+ * The stack_free_list can only be accessed at splsched,
+ * because stack_alloc_try/thread_swapin operate at splsched.
+ */
+decl_simple_lock_data(,stack_free_list_lock)
+vm_offset_t stack_free_list; /* splsched only */
+unsigned int stack_free_count = 0; /* splsched only */
+unsigned int stack_free_limit = 1; /* patchable */
+
+unsigned int stack_alloc_hits = 0; /* debugging */
+unsigned int stack_alloc_misses = 0; /* debugging */
+unsigned int stack_alloc_max = 0; /* debugging */
+
+/*
+ * The next field is at the base of the stack,
+ * so the low end is left unsullied. The page
+ * field must be preserved.
+ */
+
+#define stack_next(stack) STACK_MSB(stack)->next
+
+/*
+ * stack_alloc_try:
+ *
+ * Non-blocking attempt to allocate a kernel stack.
+ * Called at splsched with the thread locked.
+ */
+
+boolean_t stack_alloc_try(thread, continuation)
+ register thread_t thread;
+ void (*continuation)();
+{
+ register vm_offset_t stack;
+
+ simple_lock(&stack_free_list_lock);
+ stack = stack_free_list;
+ if (stack != 0) {
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ } else {
+ stack = thread->stack_privilege;
+ if (stack == 0) {
+ stack_alloc_misses++;
+ simple_unlock(&stack_free_list_lock);
+ return FALSE;
+ }
+ }
+ stack_alloc_hits++;
+ simple_unlock(&stack_free_list_lock);
+
+ stack_attach(thread, stack, continuation);
+ return TRUE;
+}
+
+/*
+ * stack_alloc:
+ *
+ * Allocate a kernel stack for a thread.
+ * May block.
+ */
+
+#define ALLOCATE_STACK_WITH_GARD_PAGES 0
+#include <vm/vm_kern.h>
+
+void stack_alloc(thread, continuation)
+ register thread_t thread;
+ void (*continuation)();
+{
+ register vm_offset_t stack;
+ spl_t s;
+
+ /*
+ * We first try the free list. It is probably empty,
+ * or stack_alloc_try would have succeeded, but possibly
+ * a stack was freed before the swapin thread got to us.
+ */
+
+ s = splsched();
+ simple_lock(&stack_free_list_lock);
+ stack = stack_free_list;
+ if (stack != 0) {
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ }
+ simple_unlock(&stack_free_list_lock);
+ (void) splx(s);
+
+ if (stack == 0) {
+#if ALLOCATE_STACK_WITH_GARD_PAGES
+ vm_offset_t addr = 0;
+ kmem_alloc_aligned( kernel_map, &addr, 4 * PAGE_SIZE);
+ if (vm_protect( kernel_map, addr, PAGE_SIZE, 1, VM_PROT_READ))
+ gimmeabreak();
+ stack = addr + PAGE_SIZE;
+ if (vm_protect( kernel_map, stack + PAGE_SIZE, 2*PAGE_SIZE, 1, VM_PROT_READ))
+ gimmeabreak();
+#else
+ register vm_page_t m;
+
+ while ((m = vm_page_grab()) == VM_PAGE_NULL)
+ VM_PAGE_WAIT((void (*)()) 0);
+
+ stack = PHYS_TO_K0SEG(m->phys_addr);
+#if MACH_DEBUG
+ stack_init(stack);
+#endif MACH_DEBUG
+ STACK_MSB(stack)->page = m;
+#endif /* ALLOCATE_STACK_WITH_GARD_PAGES */
+ }
+
+ stack_attach(thread, stack, continuation);
+}
+
+/*
+ * stack_free:
+ *
+ * Free a thread's kernel stack.
+ * Called at splsched with the thread locked.
+ */
+
+void stack_free(thread)
+ register thread_t thread;
+{
+ register vm_offset_t stack;
+
+ stack = thread->kernel_stack;
+
+ if (stack != thread->stack_privilege) {
+ simple_lock(&stack_free_list_lock);
+ stack_next(stack) = stack_free_list;
+ stack_free_list = stack;
+ if (++stack_free_count > stack_alloc_max)
+ stack_alloc_max = stack_free_count;
+ simple_unlock(&stack_free_list_lock);
+ }
+}
+
+/*
+ * stack_collect:
+ *
+ * Free excess kernel stacks.
+ * May block.
+ */
+
+void stack_collect()
+{
+ extern vm_page_t vm_page_array;
+ extern int first_page;
+
+ register vm_offset_t stack;
+ spl_t s;
+
+#if ALLOCATE_STACK_WITH_GARD_PAGES
+#else
+ s = splsched();
+ simple_lock(&stack_free_list_lock);
+ while (stack_free_count > stack_free_limit) {
+ stack = stack_free_list;
+ stack_free_list = stack_next(stack);
+ stack_free_count--;
+ simple_unlock(&stack_free_list_lock);
+ (void) splx(s);
+
+#if MACH_DEBUG
+ stack_finalize(stack);
+#endif MACH_DEBUG
+ vm_page_release(STACK_MSB(stack)->page);
+
+ s = splsched();
+ simple_lock(&stack_free_list_lock);
+ }
+ simple_unlock(&stack_free_list_lock);
+ (void) splx(s);
+#endif /* ALLOCATE_STACK_WITH_GARD_PAGES */
+}
+
+#if MACH_DEBUG
+extern boolean_t stack_check_usage;
+
+/*
+ * stack_statistics:
+ *
+ * Return statistics on cached kernel stacks
+ * kept by this machine-dependent module.
+ * *maxusagep must be initialized by the caller.
+ */
+
+void stack_statistics(totalp, maxusagep)
+ unsigned int *totalp;
+ vm_size_t *maxusagep;
+{
+ spl_t s;
+
+ s = splsched();
+ if (stack_check_usage) {
+ register vm_offset_t stack;
+
+ /*
+ * This is pretty expensive to do at splsched,
+ * but it only happens when someone makes
+ * a debugging call, so it should be OK.
+ */
+
+ simple_lock(&stack_free_list_lock);
+ for (stack = stack_free_list;
+ stack != 0;
+ stack = stack_next(stack)) {
+ vm_size_t usage = stack_usage(stack);
+
+ if (usage > *maxusagep)
+ *maxusagep = usage;
+ }
+ simple_unlock(&stack_free_list_lock);
+ }
+
+ *totalp = stack_free_count;
+ (void) splx(s);
+}
+#endif MACH_DEBUG
+
+/* Cannot optimize this because multiP */
+static
+unload_fpa(pcb)
+ pcb_t pcb;
+{
+ register struct alpha_float_state *mfs;
+
+ mfs = pcb->mms.mfs;
+ /* Do we have state and did we use it this time around */
+ if ((vm_offset_t)mfs & 1) {
+ mfs = (struct alpha_float_state *)((vm_offset_t)mfs & ~1);
+ pcb->mms.mfs = mfs;
+ alpha_fpa_unload(mfs); /* leaves fpa disabled */
+ }
+}
+
+long show_handoff, show_ustack;
+
+extern thread_t Switch_context();
+
+thread_t switch_context(old, continuation, new)
+ register thread_t old;
+ void (*continuation)();
+ register thread_t new;
+{
+ task_t old_task, new_task;
+int mycpu = cpu_number();
+
+ unload_fpa(old->pcb);
+
+tbia(); alphacache_Iflush();
+if (show_handoff) db_printf("[%d]switch(%x -> %x) %x\n", mycpu,
+ old, new, continuation);
+if (show_ustack) db_printf("[%d]USP %x (%x) -> %x \n", mycpu,
+ old->pcb->mss.hw_pcb.usp, mfpr_usp(),
+ new->pcb->mss.hw_pcb.usp);
+
+ if ((old_task = old->task) == (new_task = new->task)) {
+ register pcb_t pcb = new->pcb;
+if (new->pcb->mss.hw_pcb.ptbr != old->pcb->mss.hw_pcb.ptbr) gimmeabreak();
+if (vm_map_pmap(new_task->map)->pid < 0) gimmeabreak();
+ pcb->mss.hw_pcb.asn = vm_map_pmap(new_task->map)->pid;
+ swpctxt(kvtophys((vm_offset_t) pcb), &(pcb)->mss.hw_pcb.ksp);
+ } else {
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, cpu_number());
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, cpu_number());
+ }
+
+ return Switch_context(old, continuation, new);
+}
+
+#if 1
+void
+stack_handoff(old,new)
+ thread_t old,new;
+{
+ task_t old_task, new_task;
+int mycpu = cpu_number();
+
+ unload_fpa(old->pcb);
+tbia(); alphacache_Iflush();
+if (show_handoff) db_printf("[%d]handoff(%x -> %x)\n", mycpu, old, new);
+if (show_ustack) db_printf("[%d]USP %x (%x) -> %x \n", mycpu,
+ old->pcb->mss.hw_pcb.usp, mfpr_usp(),
+ new->pcb->mss.hw_pcb.usp);
+
+ old_task = old->task;
+ new_task = new->task;
+ if (old_task != new_task) {
+#if NCPUS>1
+ register int my_cpu = cpu_number();
+
+ PMAP_DEACTIVATE_USER(vm_map_pmap(old_task->map),
+ old, my_cpu);
+ PMAP_ACTIVATE_USER(vm_map_pmap(new_task->map),
+ new, my_cpu);
+#else
+ vm_map_pmap(old_task->map)->cpus_using = FALSE;
+ vm_map_pmap(new_task->map)->cpus_using = TRUE;
+#endif
+ }
+
+#if 1
+ new->kernel_stack = current_stack();
+ old->kernel_stack = 0;
+#endif
+
+ Stack_handoff(old,new,old_task,new_task);
+}
+#endif
+
+decl_simple_lock_data(,pcb_free_list_lock)
+pcb_t pcb_free_list; /* list of unused pcb structures */
+unsigned int pcb_free_count; /* size of the list, for debugging */
+unsigned int pcb_wasted_count; /* number of unusable pcbs allocated */
+zone_t pcb_zone; /* used when free list is empty */
+
+zone_t mfs_zone;
+zone_t msss_zone;
+
+void pcb_module_init()
+{
+ simple_lock_init(&stack_free_list_lock);
+ simple_lock_init(&pcb_free_list_lock);
+
+ pcb_zone = zinit(sizeof(struct pcb),
+ THREAD_MAX * sizeof(struct pcb),
+ THREAD_CHUNK * sizeof(struct pcb),
+ FALSE, "alpha pcb state");
+ zcollectable(pcb_zone); /* sigh, alignment */
+
+ mfs_zone = zinit(sizeof(struct alpha_float_state),
+ THREAD_MAX * sizeof(struct alpha_float_state),
+ THREAD_CHUNK * sizeof(struct alpha_float_state),
+ FALSE, "alpha float state");
+
+ msss_zone = zinit(sizeof(struct alpha_sstep_state),
+ THREAD_MAX * sizeof(struct alpha_sstep_state),
+ THREAD_CHUNK * sizeof(struct alpha_sstep_state),
+ FALSE, "alpha sstep state");
+}
+
+pcb_t pcb_alloc()
+{
+ pcb_t pcb;
+
+ simple_lock(&pcb_free_list_lock);
+ if (pcb = pcb_free_list) {
+ pcb_free_list = * (pcb_t *) pcb;
+ pcb_free_count--;
+ }
+ simple_unlock(&pcb_free_list_lock);
+
+ if (pcb)
+ return pcb;
+
+ for (;;) {
+ vm_offset_t va, pa;
+
+ /*
+ * We use the zone package to get more pcbs.
+ * This is tricky, because we need k0seg memory.
+ */
+
+ va = zalloc(pcb_zone);
+ /* must be aligned or else */
+ if (va & (sizeof(struct pcb)-1))
+ panic("pcb_alloc");
+ if (ISA_K0SEG(va))
+ return (pcb_t) va;
+
+ /*
+ * We can convert the virtual address to a
+ * physical address, if the pcb lies entirely in
+ * one physical page. It should.
+ */
+
+ if (alpha_trunc_page(va) + ALPHA_PGBYTES ==
+ alpha_round_page((vm_offset_t) ((pcb_t) va + 1))) {
+ pa = pmap_resident_extract(kernel_pmap, va);
+ if (pa == 0)
+ panic("pcb_alloc", va);
+
+ return (pcb_t) PHYS_TO_K0SEG(pa);
+ }
+
+ /*
+ * Discard this pcb and try again.
+ */
+
+ pcb_wasted_count++;
+ }
+}
+
+void pcb_free(pcb)
+ pcb_t pcb;
+{
+ simple_lock(&pcb_free_list_lock);
+ pcb_free_count++;
+ * (pcb_t *) pcb = pcb_free_list;
+ pcb_free_list = pcb;
+ simple_unlock(&pcb_free_list_lock);
+}
+
+/*
+ * pcb_init:
+ *
+ * Initialize the pcb for a thread. For Alpha,
+ * also (lazily) initialize the FPA state.
+ *
+ */
+void pcb_init(thread)
+ register thread_t thread;
+{
+ register pcb_t pcb;
+
+ /*
+ * Allocate a pcb.
+ */
+ pcb = pcb_alloc();
+ thread->pcb = pcb;
+
+ /*
+ * We can't let the user see random values
+ * in his registers. They might not be so random.
+ */
+ bzero(pcb, sizeof *pcb);
+
+ pcb->mss.framep = &pcb->mss.saved_frame;
+ set_ptbr(thread->task->map->pmap, pcb, FALSE);
+
+ /*
+ * Make the thread run in user mode,
+ * if it ever comes back out of the kernel.
+ * This is done in thread_bootstrap_return().
+ */
+
+ /*
+ * Space for floating point and single-stepping state
+ * will be allocated as needed.
+ */
+}
+
+void pcb_fpa_init(thread)
+ register thread_t thread;
+{
+ register pcb_t pcb = thread->pcb;
+
+ pcb->mms.mfs = (struct alpha_float_state *) zalloc(mfs_zone);
+
+ /*
+ * We can't let the user see random values
+ * in his registers. They might not be so random.
+ */
+ bzero(pcb->mms.mfs, sizeof *pcb->mms.mfs);
+}
+
+/*
+ * pcb_terminate:
+ *
+ * Shutdown any state associated with a thread's pcb.
+ * Also, release any coprocessor(s) register state.
+ */
+void pcb_terminate(thread)
+ register struct thread *thread;
+{
+ register pcb_t pcb = thread->pcb;
+
+ if (pcb->mms.mfs != 0)
+ /* fpa disabled at ctxt switch time */
+ zfree(mfs_zone, (vm_offset_t)pcb->mms.mfs & ~1);
+
+ if (pcb->mms.msss != 0)
+ zfree(msss_zone, (vm_offset_t) pcb->mms.msss);
+ pcb_free(pcb);
+ thread->pcb = 0;
+}
+
+/*
+ * pcb_collect:
+ *
+ * Attempt to free excess pcb memory.
+ */
+
+void pcb_collect(thread)
+ thread_t thread;
+{
+}
+
+/*
+ * syscall_emulation_sync:
+ *
+ * The task's emulation vector just changed.
+ * Perform any necessary synchronization.
+ */
+
+extern struct eml_dispatch *current_dispatch[];
+
+void syscall_emulation_sync(task)
+ task_t task;
+{
+ if (task == current_task())
+ current_dispatch[cpu_number()] = task->eml_dispatch;
+}
+
+/*
+ * thread_setstatus:
+ *
+ * Set the status of the given thread.
+ */
+
+kern_return_t thread_setstatus(thread, flavor, tstate, count)
+ thread_t thread;
+ int flavor;
+ thread_state_t tstate;
+ natural_t count;
+{
+ switch (flavor) {
+ case ALPHA_THREAD_STATE: {
+ register struct alpha_saved_state *mss;
+ register struct alpha_thread_state *mts;
+
+ if (count != ALPHA_THREAD_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ mss = & USER_REGS(thread)->mss;
+ mts = (struct alpha_thread_state *) tstate;
+
+ mss->v0 = mts->r0;
+ mss->t0 = mts->r1;
+ mss->saved_frame.saved_r2 = mts->r2;
+ mss->saved_frame.saved_r3 = mts->r3;
+ mss->saved_frame.saved_r4 = mts->r4;
+ mss->saved_frame.saved_r5 = mts->r5;
+ mss->saved_frame.saved_r6 = mts->r6;
+ mss->saved_frame.saved_r7 = mts->r7;
+ mss->t7 = mts->r8;
+ mss->s0 = mts->r9;
+ mss->s1 = mts->r10;
+ mss->s2 = mts->r11;
+ mss->s3 = mts->r12;
+ mss->s4 = mts->r13;
+ mss->s5 = mts->r14;
+ mss->s6 = mts->r15;
+ mss->a0 = mts->r16;
+ mss->a1 = mts->r17;
+ mss->a2 = mts->r18;
+ mss->a3 = mts->r19;
+ mss->a4 = mts->r20;
+ mss->a5 = mts->r21;
+ mss->t8 = mts->r22;
+ mss->t9 = mts->r23;
+ mss->t10 = mts->r24;
+ mss->t11 = mts->r25;
+ mss->ra = mts->r26;
+ mss->t12 = mts->r27;
+ mss->at = mts->r28;
+ mss->gp = mts->r29;
+ mss->sp = mts->r30;
+ mss->hw_pcb.usp = mts->r30;
+/* XXXX if thread == current_thread mtpr_usp */
+ mss->saved_frame.saved_pc = mts->pc;
+ break;
+ }
+
+ case ALPHA_FLOAT_STATE: {
+ register pcb_t pcb;
+ register struct alpha_float_state *mfs;
+
+ if (count != ALPHA_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ pcb = thread->pcb;
+ mfs = (struct alpha_float_state *) tstate;
+
+ if (pcb->mms.mfs == 0)
+ pcb->mms.mfs = (struct alpha_float_state *)
+ zalloc(mfs_zone);
+
+ bcopy(mfs, pcb->mms.mfs, sizeof *mfs);
+
+ break;
+ }
+
+ case ALPHA_EXC_STATE: {
+ register pcb_t pcb;
+ register struct alpha_exc_state *mes;
+
+ if (count != ALPHA_EXC_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ mes = (struct alpha_exc_state *) tstate;
+
+ if (mes->cause != ALPHA_EXC_SET_SSTEP)
+ return(KERN_INVALID_ARGUMENT);
+
+ pcb = thread->pcb;
+ if (pcb->mms.msss == 0) {
+ pcb->mms.msss = (struct alpha_sstep_state *)
+ zalloc(msss_zone);
+ pcb->mms.msss->ss_count = 0;
+ }
+ break;
+ }
+
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+
+ return(KERN_SUCCESS);
+
+}
+
+/*
+ * thread_getstatus:
+ *
+ * Get the status of the specified thread.
+ */
+
+kern_return_t thread_getstatus(thread, flavor, tstate, count)
+ register thread_t thread;
+ int flavor;
+ thread_state_t tstate; /* pointer to OUT array */
+ natural_t *count; /* IN/OUT */
+{
+ switch (flavor) {
+ case THREAD_STATE_FLAVOR_LIST:
+ if (*count < 3)
+ return(KERN_INVALID_ARGUMENT);
+
+ tstate[0] = ALPHA_THREAD_STATE;
+ tstate[1] = ALPHA_FLOAT_STATE;
+ tstate[2] = ALPHA_EXC_STATE;
+
+ *count = 3;
+ break;
+
+ case ALPHA_THREAD_STATE: {
+ register struct alpha_saved_state *mss;
+ register struct alpha_thread_state *mts;
+
+ if (*count < ALPHA_THREAD_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ mss = & USER_REGS(thread)->mss;
+ mts = (struct alpha_thread_state *) tstate;
+
+ mts->r0 = mss->v0;
+ mts->r1 = mss->t0;
+ mts->r2 = mss->saved_frame.saved_r2;
+ mts->r3 = mss->saved_frame.saved_r3;
+ mts->r4 = mss->saved_frame.saved_r4;
+ mts->r5 = mss->saved_frame.saved_r5;
+ mts->r6 = mss->saved_frame.saved_r6;
+ mts->r7 = mss->saved_frame.saved_r7;
+ mts->r8 = mss->t7;
+ mts->r9 = mss->s0;
+ mts->r10 = mss->s1;
+ mts->r11 = mss->s2;
+ mts->r12 = mss->s3;
+ mts->r13 = mss->s4;
+ mts->r14 = mss->s5;
+ mts->r15 = mss->s6;
+ mts->r16 = mss->a0;
+ mts->r17 = mss->a1;
+ mts->r18 = mss->a2;
+ mts->r19 = mss->a3;
+ mts->r20 = mss->a4;
+ mts->r21 = mss->a5;
+ mts->r22 = mss->t8;
+ mts->r23 = mss->t9;
+ mts->r24 = mss->t10;
+ mts->r25 = mss->t11;
+ mts->r26 = mss->ra;
+ mts->r27 = mss->t12;
+ mts->r28 = mss->at;
+ mts->r29 = mss->gp;
+ mts->r30 = mss->hw_pcb.usp;
+ mts->pc = mss->saved_frame.saved_pc;
+
+ *count = ALPHA_THREAD_STATE_COUNT;
+ break;
+ }
+
+ case ALPHA_FLOAT_STATE: {
+ register pcb_t pcb;
+ register struct alpha_float_state *mfs;
+
+ if (*count < ALPHA_FLOAT_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ pcb = thread->pcb;
+ mfs = (struct alpha_float_state *) tstate;
+
+ if (pcb->mms.mfs) {
+ /* fpa state dumped at ctxt switch time */
+ bcopy(pcb->mms.mfs, mfs, sizeof *mfs);
+ } else
+ bzero(mfs, sizeof *mfs);
+
+ *count = ALPHA_FLOAT_STATE_COUNT;
+ break;
+ }
+
+ case ALPHA_EXC_STATE: {
+ register struct alpha_saved_state *mss;
+ register struct alpha_exc_state *mes;
+
+ if (*count < ALPHA_EXC_STATE_COUNT)
+ return(KERN_INVALID_ARGUMENT);
+
+ mss = & USER_REGS(thread)->mss;
+ mes = (struct alpha_exc_state *) tstate;
+
+ mes->cause = mss->cause;
+ mes->address = mss->bad_address;
+ mes->used_fpa = (thread->pcb->mms.mfs != 0);
+
+ *count = ALPHA_EXC_STATE_COUNT;
+ break;
+ }
+
+ default:
+ return(KERN_INVALID_ARGUMENT);
+ }
+ return(KERN_SUCCESS);
+}
+
+/*
+ * Alter the thread`s state so that a following thread_exception_return
+ * will make the thread return 'retval' from a syscall.
+ */
+void
+thread_set_syscall_return(thread, retval)
+ thread_t thread;
+ kern_return_t retval;
+{
+#if MACH_KDB
+ extern boolean_t syscalltrace;
+ if (syscalltrace) db_printf("-> %x\n", retval);
+#endif
+ USER_REGS(thread)->mss.v0 = retval;
+}
+
+
+/*
+ * Return prefered address of user stack.
+ * Always returns low address. If stack grows up,
+ * the stack grows away from this address;
+ * if stack grows down, the stack grows towards this
+ * address.
+ */
+vm_offset_t
+user_stack_low(stack_size)
+ vm_size_t stack_size;
+{
+ return (VM_MAX_ADDRESS - stack_size);
+}
+
+/*
+ * Allocate argument area and set registers for first user thread.
+ */
+vm_offset_t
+set_user_regs(stack_base, stack_size, entry, arg_size)
+ vm_offset_t stack_base; /* low address */
+ vm_offset_t stack_size;
+ vm_offset_t *entry;
+ vm_size_t arg_size;
+{
+ vm_offset_t arg_addr;
+ register struct alpha_saved_state *saved_state;
+
+ arg_size = (arg_size + sizeof(vm_size_t) - 1) & ~(sizeof(vm_size_t)-1);
+ arg_addr = stack_base + stack_size - arg_size;
+
+ saved_state = & USER_REGS(current_thread())->mss;
+ saved_state->saved_frame.saved_pc = entry[0];
+ saved_state->framep->saved_pc = entry[0];
+ saved_state->gp = entry[1];
+ saved_state->sp = arg_addr;
+ saved_state->hw_pcb.usp = arg_addr;
+
+ return (arg_addr);
+}
diff --git a/alpha/alpha/pmap.c b/alpha/alpha/pmap.c
new file mode 100644
index 00000000..56d753a6
--- /dev/null
+++ b/alpha/alpha/pmap.c
@@ -0,0 +1,2791 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: pmap.c,v $
+ * Revision 2.4 93/05/15 19:11:31 mrt
+ * machparam.h -> machspl.h
+ *
+ * Revision 2.3 93/03/09 10:50:31 danner
+ * GCC quiets.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/02/05 07:59:42 danner
+ * Added pmap_set_modify, for some smart device driver.
+ * [93/02/04 00:51:56 af]
+ *
+ * Fixed locking bug: at interrupt time one should not call
+ * pmap_extract() ! Made kvtophys() call the safer one.
+ * [93/01/15 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:17:39 af]
+ *
+ * Support for new MI multiP ddb. Code probably belongs elsewhere.
+ * [92/12/16 12:45:52 af]
+ *
+ * Created, starting from i386 version and extending it with
+ * one more layer in the pagetable tree.
+ * [92/12/10 15:02:31 af]
+ *
+ */
+
+/*
+ * File: pmap.c
+ *
+ * Author list
+ * vax: Avadis Tevanian, Jr., Michael Wayne Young
+ * i386: Lance Berc, Mike Kupfer, Bob Baron, David Golub, Richard Draves
+ * alpha: Alessandro Forin
+ *
+ * Physical Map management code for DEC Alpha
+ *
+ * Manages physical address maps.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+/*
+ * In addition to hardware address maps, this
+ * module is called upon to provide software-use-only
+ * maps which may or may not be stored in the same
+ * form as hardware maps. These pseudo-maps are
+ * used to store intermediate results from copy
+ * operations to and from address spaces.
+ *
+ * Since the information managed by this module is
+ * also stored by the logical address mapping module,
+ * this module may throw away valid virtual-to-physical
+ * mappings at almost any time. However, invalidations
+ * of virtual-to-physical mappings must be done as
+ * requested.
+ *
+ * In order to cope with hardware architectures which
+ * make virtual-to-physical map invalidates expensive,
+ * this module may delay invalidate or reduced protection
+ * operations until such time as they are actually
+ * necessary. This module is given full information as
+ * to which processors are currently using which maps,
+ * and to when physical maps must be made correct.
+ */
+
+#include <cpus.h>
+#include <mach_kdb.h>
+
+#include <mach/std_types.h>
+
+#include <kern/lock.h>
+
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+#include <machine/machspl.h>
+
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_kern.h>
+#include <mach/vm_param.h>
+#include <mach/vm_prot.h>
+#include <vm/vm_object.h>
+#include <vm/vm_page.h>
+#include <vm/vm_user.h>
+#include <mach/machine/vm_param.h>
+#include <machine/thread.h>
+
+#include <alpha/alpha_scb.h>
+
+#define roundup(x,s) (((x) + ((s)-1)) & ~((s)-1))
+
+/* For external use... */
+vm_offset_t kvtophys(vm_offset_t virt)
+{
+ vm_offset_t pmap_resident_extract();
+ return pmap_resident_extract(kernel_pmap, virt);
+}
+
+/* ..but for internal use... */
+#define phystokv(a) PHYS_TO_K0SEG(a)
+#define kvtophys(p) K0SEG_TO_PHYS(p)
+
+
+/*
+ * Private data structures.
+ */
+/*
+ * Map from MI protection codes to MD codes.
+ * Assume that there are three MI protection codes, all using low bits.
+ */
+unsigned int user_protection_codes[8];
+unsigned int kernel_protection_codes[8];
+
+alpha_protection_init()
+{
+ register unsigned int *kp, *up, prot;
+
+ kp = kernel_protection_codes;
+ up = user_protection_codes;
+ for (prot = 0; prot < 8; prot++) {
+ switch (prot) {
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
+ *kp++ = 0;
+ *up++ = 0;
+ break;
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
+ case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
+ *kp++ = ALPHA_PTE_KR;
+ *up++ = ALPHA_PTE_UR|ALPHA_PTE_KR;
+ break;
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
+ *kp++ = ALPHA_PTE_KW;
+ *up++ = ALPHA_PTE_UW|ALPHA_PTE_KW;
+ break;
+ case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
+ case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
+ *kp++ = ALPHA_PTE_KW|ALPHA_PTE_KR;
+ *up++ = ALPHA_PTE_UW|ALPHA_PTE_UR|ALPHA_PTE_KW|ALPHA_PTE_KR;
+ break;
+ }
+ }
+}
+
+/*
+ * Given a map and a machine independent protection code,
+ * convert to a alpha protection code.
+ */
+
+#define alpha_protection(map, prot) \
+ (((map) == kernel_pmap) ? kernel_protection_codes[prot] : \
+ user_protection_codes[prot])
+
+/* Build the typical kernel pte */
+#define pte_ktemplate(t,pa,pr) \
+MACRO_BEGIN \
+ (t) = pa_to_pte(pa) | ALPHA_PTE_VALID | ALPHA_PTE_GLOBAL | \
+ (alpha_protection(kernel_pmap,pr) << ALPHA_PTE_PROTOFF); \
+MACRO_END
+
+/* build the typical pte */
+#define pte_template(m,t,pa,pr) \
+MACRO_BEGIN \
+ (t) = pa_to_pte(pa) | ALPHA_PTE_VALID | \
+ (alpha_protection(m,pr) << ALPHA_PTE_PROTOFF); \
+MACRO_END
+
+/*
+ * For each vm_page_t, there is a list of all currently
+ * valid virtual mappings of that page. An entry is
+ * a pv_entry_t; the list is the pv_table.
+ */
+
+typedef struct pv_entry {
+ struct pv_entry *next; /* next pv_entry */
+ pmap_t pmap; /* pmap where mapping lies */
+ vm_offset_t va; /* virtual address for mapping */
+} *pv_entry_t;
+
+#define PV_ENTRY_NULL ((pv_entry_t) 0)
+
+pv_entry_t pv_head_table; /* array of entries, one per page */
+
+/*
+ * pv_list entries are kept on a list that can only be accessed
+ * with the pmap system locked (at SPLVM, not in the cpus_active set).
+ * The list is refilled from the pv_list_zone if it becomes empty.
+ */
+pv_entry_t pv_free_list; /* free list at SPLVM */
+decl_simple_lock_data(, pv_free_list_lock)
+
+#define PV_ALLOC(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ if ((pv_e = pv_free_list) != 0) { \
+ pv_free_list = pv_e->next; \
+ } \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+#define PV_FREE(pv_e) { \
+ simple_lock(&pv_free_list_lock); \
+ pv_e->next = pv_free_list; \
+ pv_free_list = pv_e; \
+ simple_unlock(&pv_free_list_lock); \
+}
+
+zone_t pv_list_zone; /* zone of pv_entry structures */
+
+/*
+ * Each entry in the pv_head_table is locked by a bit in the
+ * pv_lock_table. The lock bits are accessed by the physical
+ * address of the page they lock.
+ */
+
+char *pv_lock_table; /* pointer to array of bits */
+#define pv_lock_table_size(n) (((n)+BYTE_SIZE-1)/BYTE_SIZE)
+
+/*
+ * First and last physical addresses that we maintain any information
+ * for. Initialized to zero so that pmap operations done before
+ * pmap_init won't touch any non-existent structures.
+ */
+vm_offset_t vm_first_phys = (vm_offset_t) 0;
+vm_offset_t vm_last_phys = (vm_offset_t) 0;
+boolean_t pmap_initialized = FALSE;/* Has pmap_init completed? */
+
+/*
+ * Index into pv_head table, its lock bits, and the modify/reference
+ * bits starting at vm_first_phys.
+ */
+
+#define pa_index(pa) (atop(pa - vm_first_phys))
+
+#define pai_to_pvh(pai) (&pv_head_table[pai])
+#define lock_pvh_pai(pai) (bit_lock(pai, pv_lock_table))
+#define unlock_pvh_pai(pai) (bit_unlock(pai, pv_lock_table))
+
+/*
+ * Array of physical page attributes for managed pages.
+ * One byte per physical page.
+ */
+char *pmap_phys_attributes;
+
+/*
+ * Physical page attributes. Copy bits from PTE.
+ */
+#define PHYS_MODIFIED (ALPHA_PTE_MOD>>16) /* page modified */
+#define PHYS_REFERENCED (ALPHA_PTE_REF>>16) /* page referenced */
+
+#define pte_get_attributes(p) ((*p & (ALPHA_PTE_MOD|ALPHA_PTE_REF)) >> 16)
+
+/*
+ * Amount of virtual memory mapped by one
+ * page-directory entry.
+ */
+#define PDE_MAPPED_SIZE (pdetova(1))
+#define PDE2_MAPPED_SIZE (pde2tova(1))
+#define PDE3_MAPPED_SIZE (pde3tova(1))
+
+/*
+ * We allocate page table pages directly from the VM system
+ * through this object. It maps physical memory.
+ */
+vm_object_t pmap_object = VM_OBJECT_NULL;
+
+/*
+ * Locking and TLB invalidation
+ */
+
+/*
+ * Locking Protocols:
+ *
+ * There are two structures in the pmap module that need locking:
+ * the pmaps themselves, and the per-page pv_lists (which are locked
+ * by locking the pv_lock_table entry that corresponds to the pv_head
+ * for the list in question.) Most routines want to lock a pmap and
+ * then do operations in it that require pv_list locking -- however
+ * pmap_remove_all and pmap_copy_on_write operate on a physical page
+ * basis and want to do the locking in the reverse order, i.e. lock
+ * a pv_list and then go through all the pmaps referenced by that list.
+ * To protect against deadlock between these two cases, the pmap_lock
+ * is used. There are three different locking protocols as a result:
+ *
+ * 1. pmap operations only (pmap_extract, pmap_access, ...) Lock only
+ * the pmap.
+ *
+ * 2. pmap-based operations (pmap_enter, pmap_remove, ...) Get a read
+ * lock on the pmap_lock (shared read), then lock the pmap
+ * and finally the pv_lists as needed [i.e. pmap lock before
+ * pv_list lock.]
+ *
+ * 3. pv_list-based operations (pmap_remove_all, pmap_copy_on_write, ...)
+ * Get a write lock on the pmap_lock (exclusive write); this
+ * also guaranteees exclusive access to the pv_lists. Lock the
+ * pmaps as needed.
+ *
+ * At no time may any routine hold more than one pmap lock or more than
+ * one pv_list lock. Because interrupt level routines can allocate
+ * mbufs and cause pmap_enter's, the pmap_lock and the lock on the
+ * kernel_pmap can only be held at splvm.
+ */
+
+#if NCPUS > 1
+/*
+ * We raise the interrupt level to splvm, to block interprocessor
+ * interrupts during pmap operations. We must take the CPU out of
+ * the cpus_active set while interrupts are blocked.
+ */
+#define SPLVM(spl) { \
+ spl = splvm(); \
+ i_bit_clear(cpu_number(), &cpus_active); \
+}
+
+#define SPLX(spl) { \
+ i_bit_set(cpu_number(), &cpus_active); \
+ splx(spl); \
+}
+
+/*
+ * Lock on pmap system
+ */
+lock_data_t pmap_system_lock;
+
+volatile boolean_t cpu_update_needed[NCPUS];
+
+#define PMAP_READ_LOCK(pmap, spl) { \
+ SPLVM(spl); \
+ lock_read(&pmap_system_lock); \
+ simple_lock(&(pmap)->lock); \
+}
+
+#define PMAP_WRITE_LOCK(spl) { \
+ SPLVM(spl); \
+ lock_write(&pmap_system_lock); \
+}
+
+#define PMAP_READ_UNLOCK(pmap, spl) { \
+ simple_unlock(&(pmap)->lock); \
+ lock_read_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_UNLOCK(spl) { \
+ lock_write_done(&pmap_system_lock); \
+ SPLX(spl); \
+}
+
+#define PMAP_WRITE_TO_READ_LOCK(pmap) { \
+ simple_lock(&(pmap)->lock); \
+ lock_write_to_read(&pmap_system_lock); \
+}
+
+#define LOCK_PVH(index) (lock_pvh_pai(index))
+
+#define UNLOCK_PVH(index) (unlock_pvh_pai(index))
+
+#define PMAP_UPDATE_TLBS(pmap, s, e) \
+{ \
+ cpu_set cpu_mask = 1 << cpu_number(); \
+ cpu_set users; \
+ \
+ /* Since the pmap is locked, other updates are locked */ \
+ /* out, and any pmap_activate has finished. */ \
+ \
+ /* find other cpus using the pmap */ \
+ users = (pmap)->cpus_using & ~cpu_mask; \
+ if (users) { \
+ /* signal them, and wait for them to finish */ \
+ /* using the pmap */ \
+ signal_cpus(users, (pmap), (s), (e)); \
+ while ((pmap)->cpus_using & cpus_active & ~cpu_mask) \
+ continue; \
+ } \
+ \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using & cpu_mask) { \
+ INVALIDATE_TLB((s), (e)); \
+ } \
+}
+
+#else NCPUS > 1
+
+#define SPLVM(spl)
+#define SPLX(spl)
+
+#define PMAP_READ_LOCK(pmap, spl) SPLVM(spl)
+#define PMAP_WRITE_LOCK(spl) SPLVM(spl)
+#define PMAP_READ_UNLOCK(pmap, spl) SPLX(spl)
+#define PMAP_WRITE_UNLOCK(spl) SPLX(spl)
+#define PMAP_WRITE_TO_READ_LOCK(pmap)
+
+#define LOCK_PVH(index)
+#define UNLOCK_PVH(index)
+
+#if 0 /*fix bug later */
+#define PMAP_UPDATE_TLBS(pmap, s, e) { \
+ /* invalidate our own TLB if pmap is in use */ \
+ if ((pmap)->cpus_using) { \
+ INVALIDATE_TLB((s), (e)); \
+ } \
+}
+#else
+#define PMAP_UPDATE_TLBS(pmap, s, e) { \
+ INVALIDATE_TLB((s), (e)); \
+}
+#endif
+
+#endif /* NCPUS > 1 */
+
+#if 0
+#define INVALIDATE_TLB(s, e) { \
+ register vm_offset_t v = s, ve = e; \
+ while (v < ve) { \
+ tbis(v); v += ALPHA_PGBYTES; \
+ } \
+}
+#else
+#define INVALIDATE_TLB(s, e) { \
+ tbia(); \
+}
+#endif
+
+
+#if NCPUS > 1
+
+void pmap_update_interrupt();
+
+/*
+ * Structures to keep track of pending TLB invalidations
+ */
+
+#define UPDATE_LIST_SIZE 4
+
+struct pmap_update_item {
+ pmap_t pmap; /* pmap to invalidate */
+ vm_offset_t start; /* start address to invalidate */
+ vm_offset_t end; /* end address to invalidate */
+} ;
+
+typedef struct pmap_update_item *pmap_update_item_t;
+
+/*
+ * List of pmap updates. If the list overflows,
+ * the last entry is changed to invalidate all.
+ */
+struct pmap_update_list {
+ decl_simple_lock_data(, lock)
+ int count;
+ struct pmap_update_item item[UPDATE_LIST_SIZE];
+} ;
+typedef struct pmap_update_list *pmap_update_list_t;
+
+struct pmap_update_list cpu_update_list[NCPUS];
+
+#endif /* NCPUS > 1 */
+
+/*
+ * Other useful macros.
+ */
+#define current_pmap() (vm_map_pmap(current_thread()->task->map))
+#define pmap_in_use(pmap, cpu) (((pmap)->cpus_using & (1 << (cpu))) != 0)
+
+struct pmap kernel_pmap_store;
+pmap_t kernel_pmap;
+
+struct zone *pmap_zone; /* zone of pmap structures */
+
+int pmap_debug = 0; /* flag for debugging prints */
+int ptes_per_vm_page; /* number of hardware ptes needed
+ to map one VM page. */
+unsigned int inuse_ptepages_count = 0; /* debugging */
+
+extern char end;
+/*
+ * Page directory for kernel.
+ */
+extern pt_entry_t root_kpdes[]; /* see start.s */
+
+void pmap_remove_range(); /* forward */
+#if NCPUS > 1
+void signal_cpus(); /* forward */
+#endif /* NCPUS > 1 */
+
+/*
+ * Given an offset and a map, compute the address of the
+ * pte. If the address is invalid with respect to the map
+ * then PT_ENTRY_NULL is returned (and the map may need to grow).
+ *
+ * This is only used internally.
+ */
+#define pmap_pde(pmap, addr) (&(pmap)->dirbase[pdenum(addr)])
+
+pt_entry_t *pmap_pte(pmap, addr)
+ register pmap_t pmap;
+ register vm_offset_t addr;
+{
+ register pt_entry_t *ptp;
+ register pt_entry_t pte;
+
+ if (pmap->dirbase == 0)
+ return(PT_ENTRY_NULL);
+ /* seg1 */
+ pte = *pmap_pde(pmap,addr);
+ if ((pte & ALPHA_PTE_VALID) == 0)
+ return(PT_ENTRY_NULL);
+ /* seg2 */
+ ptp = (pt_entry_t *)ptetokv(pte);
+ pte = ptp[pte2num(addr)];
+ if ((pte & ALPHA_PTE_VALID) == 0)
+ return(PT_ENTRY_NULL);
+ /* seg3 */
+ ptp = (pt_entry_t *)ptetokv(pte);
+ return(&ptp[pte3num(addr)]);
+
+}
+
+#define DEBUG_PTE_PAGE 1
+
+extern vm_offset_t virtual_avail, virtual_end;
+extern vm_offset_t avail_start, avail_end;
+
+/*
+ * Bootstrap the system enough to run with virtual memory.
+ * Map the kernel's code and data, and allocate the system page table.
+ * Called with mapping OFF. Page_size must already be set.
+ *
+ * Parameters:
+ * avail_start PA of first available physical page
+ * avail_end PA of last available physical page
+ * virtual_avail VA of first available page
+ * virtual_end VA of last available page
+ *
+ */
+vm_size_t pmap_kernel_vm = 5; /* each one 8 meg worth */
+
+void pmap_bootstrap()
+{
+ vm_offset_t pa;
+ pt_entry_t template;
+ pt_entry_t *pde, *pte;
+ int i;
+ extern boolean_t vm_fault_dirty_handling;
+
+
+ /*
+ * Tell VM to do mod bits for us
+ */
+ vm_fault_dirty_handling = TRUE;
+
+ alpha_protection_init();
+
+ /*
+ * Set ptes_per_vm_page for general use.
+ */
+ ptes_per_vm_page = page_size / ALPHA_PGBYTES;
+
+ /*
+ * The kernel's pmap is statically allocated so we don't
+ * have to use pmap_create, which is unlikely to work
+ * correctly at this part of the boot sequence.
+ */
+
+ kernel_pmap = &kernel_pmap_store;
+
+#if NCPUS > 1
+ lock_init(&pmap_system_lock, FALSE); /* NOT a sleep lock */
+#endif /* NCPUS > 1 */
+
+ simple_lock_init(&kernel_pmap->lock);
+
+ kernel_pmap->ref_count = 1;
+
+ /*
+ * The kernel page directory has been allocated;
+ * its virtual address is in root_kpdes.
+ *
+ * No other physical memory has been allocated.
+ */
+
+ kernel_pmap->dirbase = root_kpdes;
+
+ /*
+ * The distinguished tlbpid value of 0 is reserved for
+ * the kernel pmap. Initialize the tlbpid allocator,
+ * who knows about this.
+ */
+ kernel_pmap->pid = 0;
+ pmap_tlbpid_init();
+
+#if 0
+ /*
+ * Rid of console's default mappings
+ */
+ for (pde = pmap_pde(kernel_pmap,0);
+ pde < pmap_pde(kernel_pmap,VM_MIN_KERNEL_ADDRESS);)
+ *pde++ = 0;
+
+#endif
+ /*
+ * Allocate the seg2 kernel page table entries from the front
+ * of available physical memory. Take enough to cover all of
+ * the K2SEG range. But of course one page is enough for 8Gb,
+ * and more in future chips ...
+ */
+#define enough_kseg2() (PAGE_SIZE)
+
+ pte = (pt_entry_t *) pmap_steal_memory(enough_kseg2()); /* virtual */
+ pa = kvtophys(pte); /* physical */
+ bzero(pte, enough_kseg2());
+
+#undef enough_kseg2
+
+ /*
+ * Make a note of it in the seg1 table
+ */
+
+ tbia();
+ pte_ktemplate(template,pa,VM_PROT_READ|VM_PROT_WRITE);
+ pde = pmap_pde(kernel_pmap,K2SEG_BASE);
+ i = ptes_per_vm_page;
+ do {
+ *pde++ = template;
+ pte_increment_pa(template);
+ i--;
+ } while (i > 0);
+
+ /*
+ * The kernel runs unmapped and cached (k0seg),
+ * only dynamic data are mapped in k2seg.
+ * ==> No need to map it.
+ */
+
+ /*
+ * But don't we need some seg2 pagetables to start with ?
+ */
+ pde = &pte[pte2num(K2SEG_BASE)];
+ for (i = pmap_kernel_vm; i > 0; i--) {
+ register int j;
+
+ pte = (pt_entry_t *) pmap_steal_memory(PAGE_SIZE); /* virtual */
+ pa = kvtophys(pte); /* physical */
+ pte_ktemplate(template,pa,VM_PROT_READ|VM_PROT_WRITE);
+ bzero(pte, PAGE_SIZE);
+ j = ptes_per_vm_page;
+ do {
+ *pde++ = template;
+ pte_increment_pa(template);
+ } while (--j > 0);
+ }
+
+ /*
+ * Assert kernel limits (cuz pmap_expand)
+ */
+
+ virtual_avail = round_page(K2SEG_BASE);
+ virtual_end = trunc_page(K2SEG_BASE + pde2tova(pmap_kernel_vm));
+
+ /* no console yet, so no printfs in this function */
+}
+
+pmap_rid_of_console()
+{
+ pt_entry_t *pde;
+ /*
+ * Rid of console's default mappings
+ */
+ for (pde = pmap_pde(kernel_pmap,0L);
+ pde < pmap_pde(kernel_pmap,VM_MIN_KERNEL_ADDRESS);)
+ *pde++ = 0;
+}
+
+/*
+ * Map I/O space before pmap system fully working.
+ * pmap_bootstrap() must have been called already.
+ */
+vm_offset_t
+pmap_map_io(phys, size)
+ vm_offset_t phys;
+ long size;
+{
+ pt_entry_t template;
+ pt_entry_t *pte;
+
+ pte = pmap_pte(kernel_pmap, virtual_avail);
+ if (pte == PT_ENTRY_NULL) halt(); /* extreme screwup */
+
+ pte_ktemplate(template,phys,VM_PROT_READ|VM_PROT_WRITE);
+
+ phys = virtual_avail;
+ virtual_avail += round_page(size);
+
+ while (size > 0) {
+ *pte++ = template;
+ pte_increment_pa(template);
+ size -= ALPHA_PGBYTES;
+ }
+ tbia();
+
+ return phys; /* misnomer */
+}
+
+unsigned int pmap_free_pages()
+{
+ return atop(avail_end - avail_start);
+}
+
+vm_offset_t pmap_steal_memory(size)
+ vm_size_t size;
+{
+ vm_offset_t addr;
+
+ /*
+ * We round the size to a long integer multiple.
+ */
+
+ size = roundup(size,sizeof(integer_t));
+ addr = phystokv(avail_start);
+ avail_start += size;
+ return addr;
+}
+
+/*
+ * Allocate permanent data structures in the k0seg.
+ */
+void pmap_startup(startp, endp)
+ vm_offset_t *startp, *endp;
+{
+ register long npages;
+ vm_offset_t addr;
+ register vm_size_t size;
+ int i;
+ vm_page_t pages;
+
+ /*
+ * Allocate memory for the pv_head_table and its lock bits,
+ * the modify bit array, and the vm_page structures.
+ */
+
+ npages = ((BYTE_SIZE * (avail_end - avail_start)) /
+ (BYTE_SIZE * (PAGE_SIZE + sizeof *pages +
+ sizeof *pv_head_table) + 2));
+
+ size = npages * sizeof *pages;
+ pages = (vm_page_t) pmap_steal_memory(size);
+
+ size = npages * sizeof *pv_head_table;
+ pv_head_table = (pv_entry_t) pmap_steal_memory(size);
+ bzero((char *) pv_head_table, size);
+
+ size = pv_lock_table_size(npages);
+ pv_lock_table = (char *) pmap_steal_memory(size);
+ bzero((char *) pv_lock_table, size);
+
+ size = (npages + BYTE_SIZE - 1) / BYTE_SIZE;
+ pmap_phys_attributes = (char *) pmap_steal_memory(size);
+ bzero((char *) pmap_phys_attributes, size);
+
+ avail_start = round_page(avail_start);
+
+ if (npages > pmap_free_pages())
+ panic("pmap_startup");
+
+ for (i = 0; i < npages; i++) {
+ vm_page_init(&pages[i], avail_start + ptoa(i));
+ vm_page_release(&pages[i]);
+ }
+
+ *startp = virtual_avail;
+ *endp = virtual_end;
+}
+
+/*
+ * Initialize the pmap module.
+ * Called by vm_init, to initialize any structures that the pmap
+ * system needs to map virtual memory.
+ */
+void pmap_init()
+{
+ vm_size_t s;
+ int i;
+
+ /*
+ * Create the zone of physical maps,
+ * and of the physical-to-virtual entries.
+ */
+ s = (vm_size_t) sizeof(struct pmap);
+ pmap_zone = zinit(s, 400*s, 4096, FALSE, "pmap"); /* XXX */
+ s = (vm_size_t) sizeof(struct pv_entry);
+ pv_list_zone = zinit(s, 10000*s, 4096, FALSE, "pv_list"); /* XXX */
+
+#if NCPUS > 1
+ /*
+ * Set up the pmap request lists
+ */
+ for (i = 0; i < NCPUS; i++) {
+ pmap_update_list_t up = &cpu_update_list[i];
+
+ simple_lock_init(&up->lock);
+ up->count = 0;
+ }
+
+ alpha_set_scb_entry( SCB_INTERPROC, pmap_update_interrupt);
+
+#endif /* NCPUS > 1 */
+
+ /*
+ * Only now, when all of the data structures are allocated,
+ * can we set vm_first_phys and vm_last_phys. If we set them
+ * too soon, the kmem_alloc_wired above will try to use these
+ * data structures and blow up.
+ */
+
+ vm_first_phys = avail_start;
+ vm_last_phys = avail_end;
+ pmap_initialized = TRUE;
+}
+
+#define pmap_valid_page(x) ((avail_start <= x) && (x < avail_end))
+#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
+
+/*
+ * Routine: pmap_page_table_page_alloc
+ *
+ * Allocates a new physical page to be used as a page-table page.
+ *
+ * Must be called with the pmap system and the pmap unlocked,
+ * since these must be unlocked to use vm_page_grab.
+ */
+vm_offset_t
+pmap_page_table_page_alloc()
+{
+ register vm_page_t m;
+ register vm_offset_t pa;
+
+ check_simple_locks();
+
+ /*
+ * We cannot allocate the pmap_object in pmap_init,
+ * because it is called before the zone package is up.
+ * Allocate it now if it is missing.
+ */
+ if (pmap_object == VM_OBJECT_NULL)
+ pmap_object = vm_object_allocate(mem_size);
+
+ /*
+ * Allocate a VM page
+ */
+ while ((m = vm_page_grab()) == VM_PAGE_NULL)
+ VM_PAGE_WAIT((void (*)()) 0);
+
+ /*
+ * Map the page to its physical address so that it
+ * can be found later.
+ */
+ pa = m->phys_addr;
+ vm_object_lock(pmap_object);
+ vm_page_insert(m, pmap_object, pa);
+ vm_page_lock_queues();
+ vm_page_wire(m);
+ inuse_ptepages_count++;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+
+ /*
+ * Zero the page.
+ */
+ bzero(phystokv(pa), PAGE_SIZE);
+
+ return pa;
+}
+
+/*
+ * Deallocate a page-table page.
+ * The page-table page must have all mappings removed,
+ * and be removed from its page directory.
+ */
+void
+pmap_page_table_page_dealloc(pa)
+ vm_offset_t pa;
+{
+ vm_page_t m;
+
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ if (m == VM_PAGE_NULL)
+ panic("pmap_page_table_page_dealloc: page %#X not in object", pa);
+ vm_page_lock_queues();
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+}
+
+/*
+ * Create and return a physical map.
+ *
+ * If the size specified for the map
+ * is zero, the map is an actual physical
+ * map, and may be referenced by the
+ * hardware.
+ *
+ * If the size specified is non-zero,
+ * the map will be used in software only, and
+ * is bounded by that size.
+ */
+pmap_t pmap_create(size)
+ vm_size_t size;
+{
+ register pmap_t p;
+ register pmap_statistics_t stats;
+
+ /*
+ * A software use-only map doesn't even need a map.
+ */
+
+ if (size != 0) {
+ return(PMAP_NULL);
+ }
+
+/*
+ * Allocate a pmap struct from the pmap_zone. Then allocate
+ * the page descriptor table from the pd_zone.
+ */
+
+ p = (pmap_t) zalloc(pmap_zone);
+ if (p == PMAP_NULL)
+ panic("pmap_create");
+
+ if (kmem_alloc_wired(kernel_map,
+ (vm_offset_t *)&p->dirbase, ALPHA_PGBYTES)
+ != KERN_SUCCESS)
+ panic("pmap_create");
+
+ aligned_block_copy(root_kpdes, p->dirbase, ALPHA_PGBYTES);
+ p->ref_count = 1;
+ p->pid = -1;
+
+ simple_lock_init(&p->lock);
+ p->cpus_using = 0;
+ p->hacking = 0;
+
+ /*
+ * Initialize statistics.
+ */
+
+ stats = &p->stats;
+ stats->resident_count = 0;
+ stats->wired_count = 0;
+
+if (pmap_debug) db_printf("pmap_create(%x->%x)\n", p, p->dirbase);
+ return(p);
+}
+
+/*
+ * Retire the given physical map from service.
+ * Should only be called if the map contains
+ * no valid mappings.
+ */
+
+void pmap_destroy(p)
+ register pmap_t p;
+{
+ register pt_entry_t *pdep, *ptep, *eptep;
+ register vm_offset_t pa;
+ register int c;
+ register spl_t s;
+
+ if (p == PMAP_NULL)
+ return;
+
+ SPLVM(s);
+ simple_lock(&p->lock);
+ c = --p->ref_count;
+ simple_unlock(&p->lock);
+ SPLX(s);
+
+ if (c != 0) {
+ return; /* still in use */
+ }
+
+if (pmap_debug) db_printf("pmap_destroy(%x->%x)\n", p, p->dirbase);
+ /*
+ * Free the memory maps, then the
+ * pmap structure.
+ */
+ for (pdep = p->dirbase;
+ pdep < pmap_pde(p,VM_MIN_KERNEL_ADDRESS);
+ pdep += ptes_per_vm_page) {
+ if (*pdep & ALPHA_PTE_VALID) {
+ pa = pte_to_pa(*pdep);
+
+ ptep = (pt_entry_t *)phystokv(pa);
+ eptep = ptep + NPTES;
+ for (; ptep < eptep; ptep += ptes_per_vm_page ) {
+ if (*ptep & ALPHA_PTE_VALID)
+ pmap_page_table_page_dealloc(pte_to_pa(*ptep));
+ }
+ pmap_page_table_page_dealloc(pa);
+ }
+ }
+ pmap_destroy_tlbpid(p->pid, FALSE);
+ kmem_free(kernel_map, p->dirbase, ALPHA_PGBYTES);
+ zfree(pmap_zone, (vm_offset_t) p);
+}
+
+/*
+ * Add a reference to the specified pmap.
+ */
+
+void pmap_reference(p)
+ register pmap_t p;
+{
+ spl_t s;
+ if (p != PMAP_NULL) {
+ SPLVM(s);
+ simple_lock(&p->lock);
+ p->ref_count++;
+ simple_unlock(&p->lock);
+ SPLX(s);
+ }
+}
+
+/*
+ * Remove a range of hardware page-table entries.
+ * The entries given are the first (inclusive)
+ * and last (exclusive) entries for the VM pages.
+ * The virtual address is the va for the first pte.
+ *
+ * The pmap must be locked.
+ * If the pmap is not the kernel pmap, the range must lie
+ * entirely within one pte-page. This is NOT checked.
+ * Assumes that the pte-page exists.
+ */
+
+/* static */
+void pmap_remove_range(pmap, va, spte, epte)
+ pmap_t pmap;
+ vm_offset_t va;
+ pt_entry_t *spte;
+ pt_entry_t *epte;
+{
+ register pt_entry_t *cpte;
+ int num_removed, num_unwired;
+ int pai;
+ vm_offset_t pa;
+
+ num_removed = 0;
+ num_unwired = 0;
+
+ for (cpte = spte; cpte < epte;
+ cpte += ptes_per_vm_page, va += PAGE_SIZE) {
+
+ if (*cpte == 0)
+ continue;
+ pa = pte_to_pa(*cpte);
+
+ num_removed++;
+ if (*cpte & ALPHA_PTE_WIRED)
+ num_unwired++;
+
+ if (!valid_page(pa)) {
+
+ /*
+ * Outside range of managed physical memory.
+ * Just remove the mappings.
+ */
+ register int i = ptes_per_vm_page;
+ register pt_entry_t *lpte = cpte;
+ do {
+ *lpte = 0;
+ lpte++;
+ } while (--i > 0);
+ continue;
+ }
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+
+ /*
+ * Get the modify and reference bits.
+ */
+ {
+ register int i;
+ register pt_entry_t *lpte;
+
+ i = ptes_per_vm_page;
+ lpte = cpte;
+ do {
+ pmap_phys_attributes[pai] |= pte_get_attributes(lpte);
+ *lpte = 0;
+ lpte++;
+ } while (--i > 0);
+ }
+
+ /*
+ * Remove the mapping from the pvlist for
+ * this physical page.
+ */
+ {
+ register pv_entry_t pv_h, prev, cur;
+
+ pv_h = pai_to_pvh(pai);
+ if (pv_h->pmap == PMAP_NULL) {
+ panic("pmap_remove: null pv_list!");
+ }
+ if (pv_h->va == va && pv_h->pmap == pmap) {
+ /*
+ * Header is the pv_entry. Copy the next one
+ * to header and free the next one (we cannot
+ * free the header)
+ */
+ cur = pv_h->next;
+ if (cur != PV_ENTRY_NULL) {
+ *pv_h = *cur;
+ PV_FREE(cur);
+ }
+ else {
+ pv_h->pmap = PMAP_NULL;
+ }
+ }
+ else {
+ cur = pv_h;
+ do {
+ prev = cur;
+ if ((cur = prev->next) == PV_ENTRY_NULL) {
+ panic("pmap-remove: mapping not in pv_list!");
+ }
+ } while (cur->va != va || cur->pmap != pmap);
+ prev->next = cur->next;
+ PV_FREE(cur);
+ }
+ UNLOCK_PVH(pai);
+ }
+ }
+
+ /*
+ * Update the counts
+ */
+ pmap->stats.resident_count -= num_removed;
+ pmap->stats.wired_count -= num_unwired;
+}
+
+/*
+ * One level up, iterate an operation on the
+ * virtual range va..eva, mapped by the 1st
+ * level pte spte.
+ */
+
+/* static */
+void pmap_iterate_lev2(pmap, s, e, spte, operation)
+ pmap_t pmap;
+ vm_offset_t s, e;
+ pt_entry_t *spte;
+ void (*operation)();
+{
+ vm_offset_t l;
+ pt_entry_t *epte;
+ pt_entry_t *cpte;
+
+if (pmap_debug > 1) db_printf("iterate2(%x,%x,%x)", s, e, spte);
+ while (s < e) {
+ /* at most 1 << 23 virtuals per iteration */
+ l = roundup(s+1,PDE2_MAPPED_SIZE);
+ if (l > e)
+ l = e;
+ if (*spte & ALPHA_PTE_VALID) {
+ register int n;
+ cpte = (pt_entry_t *) ptetokv(*spte);
+ n = pte3num(l);
+ if (n == 0) n = SEG_MASK + 1;/* l == next segment up */
+ epte = &cpte[n];
+ cpte = &cpte[pte3num(s)];
+if (epte < cpte) gimmeabreak();
+if (pmap_debug > 1) db_printf(" [%x %x, %x %x]", s, l, cpte, epte);
+ operation(pmap, s, cpte, epte);
+ }
+ s = l;
+ spte++;
+ }
+if (pmap_debug > 1) db_printf("\n");
+}
+
+void
+pmap_make_readonly(pmap, va, spte, epte)
+ pmap_t pmap;
+ vm_offset_t va;
+ pt_entry_t *spte;
+ pt_entry_t *epte;
+{
+ while (spte < epte) {
+ if (*spte & ALPHA_PTE_VALID)
+ *spte &= ~ALPHA_PTE_WRITE;
+ spte++;
+ }
+}
+
+/*
+ * Remove the given range of addresses
+ * from the specified map.
+ *
+ * It is assumed that the start and end are properly
+ * rounded to the hardware page size.
+ */
+vm_offset_t pmap_suspect_vs, pmap_suspect_ve;
+
+
+void pmap_remove(map, s, e)
+ pmap_t map;
+ vm_offset_t s, e;
+{
+ spl_t spl;
+ register pt_entry_t *pde;
+ register pt_entry_t *spte;
+ vm_offset_t l;
+
+ if (map == PMAP_NULL)
+ return;
+
+if (pmap_debug || ((s > pmap_suspect_vs) && (s < pmap_suspect_ve)))
+db_printf("[%d]pmap_remove(%x,%x,%x)\n", cpu_number(), map, s, e);
+ PMAP_READ_LOCK(map, spl);
+
+ /*
+ * Invalidate the translation buffer first
+ */
+ PMAP_UPDATE_TLBS(map, s, e);
+
+ pde = pmap_pde(map, s);
+ while (s < e) {
+ /* at most (1 << 33) virtuals per iteration */
+ l = roundup(s+1, PDE_MAPPED_SIZE);
+ if (l > e)
+ l = e;
+ if (*pde & ALPHA_PTE_VALID) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[pte2num(s)];
+ pmap_iterate_lev2(map, s, l, spte, pmap_remove_range);
+ }
+ s = l;
+ pde++;
+ }
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_page_protect
+ *
+ * Function:
+ * Lower the permission for all mappings to a given
+ * page.
+ */
+vm_offset_t pmap_suspect_phys;
+
+void pmap_page_protect(phys, prot)
+ vm_offset_t phys;
+ vm_prot_t prot;
+{
+ pv_entry_t pv_h, prev;
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ int pai;
+ register pmap_t pmap;
+ spl_t spl;
+ boolean_t remove;
+
+if (pmap_debug || (phys == pmap_suspect_phys)) db_printf("pmap_page_protect(%x,%x)\n", phys, prot);
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ:
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ remove = FALSE;
+ break;
+ case VM_PROT_ALL:
+ return; /* nothing to do */
+ default:
+ remove = TRUE;
+ break;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, changing or removing all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+
+ prev = pv_e = pv_h;
+ do {
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ register vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+ /*
+ * Consistency checks.
+ */
+ /* assert(*pte & ALPHA_PTE_VALID); XXX */
+ /* assert(pte_to_phys(*pte) == phys); */
+
+ /*
+ * Invalidate TLBs for all CPUs using this mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ }
+
+ /*
+ * Remove the mapping if new protection is NONE
+ * or if write-protecting a kernel mapping.
+ */
+ if (remove || pmap == kernel_pmap) {
+ /*
+ * Remove the mapping, collecting any modify bits.
+ */
+ if (*pte & ALPHA_PTE_WIRED)
+ panic("pmap_remove_all removing a wired page");
+
+ {
+ register int i = ptes_per_vm_page;
+
+ do {
+ pmap_phys_attributes[pai] |= pte_get_attributes(pte);
+ *pte++ = 0;
+ } while (--i > 0);
+ }
+
+ pmap->stats.resident_count--;
+
+ /*
+ * Remove the pv_entry.
+ */
+ if (pv_e == pv_h) {
+ /*
+ * Fix up head later.
+ */
+ pv_h->pmap = PMAP_NULL;
+ }
+ else {
+ /*
+ * Delete this entry.
+ */
+ prev->next = pv_e->next;
+ PV_FREE(pv_e);
+ }
+ }
+ else {
+ /*
+ * Write-protect.
+ */
+ register int i = ptes_per_vm_page;
+
+ do {
+ *pte &= ~ALPHA_PTE_WRITE;
+ pte++;
+ } while (--i > 0);
+
+ /*
+ * Advance prev.
+ */
+ prev = pv_e;
+ }
+
+ simple_unlock(&pmap->lock);
+
+ } while ((pv_e = prev->next) != PV_ENTRY_NULL);
+
+ /*
+ * If pv_head mapping was removed, fix it up.
+ */
+ if (pv_h->pmap == PMAP_NULL) {
+ pv_e = pv_h->next;
+ if (pv_e != PV_ENTRY_NULL) {
+ *pv_h = *pv_e;
+ PV_FREE(pv_e);
+ }
+ }
+ }
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Set the physical protection on the
+ * specified range of this map as requested.
+ * Will not increase permissions.
+ */
+void pmap_protect(map, s, e, prot)
+ pmap_t map;
+ vm_offset_t s, e;
+ vm_prot_t prot;
+{
+ register pt_entry_t *pde;
+ register pt_entry_t *spte, *epte;
+ vm_offset_t l;
+ spl_t spl;
+
+ if (map == PMAP_NULL)
+ return;
+
+if (pmap_debug || ((s > pmap_suspect_vs) && (s < pmap_suspect_ve)))
+db_printf("[%d]pmap_protect(%x,%x,%x,%x)\n", cpu_number(), map, s, e, prot);
+ /*
+ * Determine the new protection.
+ */
+ switch (prot) {
+ case VM_PROT_READ|VM_PROT_EXECUTE:
+ alphacache_Iflush();
+ case VM_PROT_READ:
+ break;
+ case VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE:
+ alphacache_Iflush();
+ case VM_PROT_READ|VM_PROT_WRITE:
+ return; /* nothing to do */
+ default:
+ pmap_remove(map, s, e);
+ return;
+ }
+
+ SPLVM(spl);
+ simple_lock(&map->lock);
+
+ /*
+ * Invalidate the translation buffer first
+ */
+ PMAP_UPDATE_TLBS(map, s, e);
+
+ pde = pmap_pde(map, s);
+ while (s < e) {
+ /* at most (1 << 33) virtuals per iteration */
+ l = roundup(s+1, PDE_MAPPED_SIZE);
+ if (l > e)
+ l = e;
+ if (*pde & ALPHA_PTE_VALID) {
+ spte = (pt_entry_t *)ptetokv(*pde);
+ spte = &spte[pte2num(s)];
+ pmap_iterate_lev2(map, s, l, spte, pmap_make_readonly);
+ }
+ s = l;
+ pde++;
+ }
+
+ simple_unlock(&map->lock);
+ SPLX(spl);
+}
+
+/*
+ * Insert the given physical page (p) at
+ * the specified virtual address (v) in the
+ * target physical map with the protection requested.
+ *
+ * If specified, the page will be wired down, meaning
+ * that the related pte can not be reclaimed.
+ *
+ * NB: This is the only routine which MAY NOT lazy-evaluate
+ * or lose information. That is, this routine must actually
+ * insert this page into the given map NOW.
+ */
+void pmap_enter(pmap, v, pa, prot, wired)
+ register pmap_t pmap;
+ vm_offset_t v;
+ register vm_offset_t pa;
+ vm_prot_t prot;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register pv_entry_t pv_h;
+ register int i, pai;
+ pv_entry_t pv_e;
+ pt_entry_t template;
+ spl_t spl;
+ vm_offset_t old_pa;
+
+ assert(pa != vm_page_fictitious_addr);
+if (pmap_debug || ((v > pmap_suspect_vs) && (v < pmap_suspect_ve)))
+db_printf("[%d]pmap_enter(%x(%d), %x, %x, %x, %x)\n", cpu_number(), pmap, pmap->pid, v, pa, prot, wired);
+ if (pmap == PMAP_NULL)
+ return;
+if (pmap->pid < 0) gimmeabreak();
+
+ /*
+ * Must allocate a new pvlist entry while we're unlocked;
+ * zalloc may cause pageout (which will lock the pmap system).
+ * If we determine we need a pvlist entry, we will unlock
+ * and allocate one. Then we will retry, throwing away
+ * the allocated entry later (if we no longer need it).
+ */
+ pv_e = PV_ENTRY_NULL;
+Retry:
+ PMAP_READ_LOCK(pmap, spl);
+
+ /*
+ * Expand pmap to include this pte. Assume that
+ * pmap is always expanded to include enough hardware
+ * pages to map one VM page.
+ */
+
+ while ((pte = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
+ /*
+ * Must unlock to expand the pmap.
+ */
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ pmap_expand(pmap, v);
+
+ PMAP_READ_LOCK(pmap, spl);
+ }
+
+ /*
+ * Special case if the physical page is already mapped
+ * at this address.
+ */
+ old_pa = pte_to_pa(*pte);
+ if (*pte && old_pa == pa) {
+ /*
+ * May be changing its wired attribute or protection
+ */
+
+ if (wired && !(*pte & ALPHA_PTE_WIRED))
+ pmap->stats.wired_count++;
+ else if (!wired && (*pte & ALPHA_PTE_WIRED))
+ pmap->stats.wired_count--;
+
+ pte_template(pmap,template,pa,prot);
+ if (pmap == kernel_pmap)
+ template |= ALPHA_PTE_GLOBAL;
+ if (wired)
+ template |= ALPHA_PTE_WIRED;
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+ i = ptes_per_vm_page;
+ do {
+ template |= (*pte & ALPHA_PTE_MOD);
+ *pte = template;
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ }
+ else {
+
+ /*
+ * Remove old mapping from the PV list if necessary.
+ */
+ if (*pte) {
+ /*
+ * Invalidate the translation buffer,
+ * then remove the mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, v, v + PAGE_SIZE);
+
+ /*
+ * Don't free the pte page if removing last
+ * mapping - we will immediately replace it.
+ */
+ pmap_remove_range(pmap, v, pte,
+ pte + ptes_per_vm_page);
+ }
+
+ if (valid_page(pa)) {
+
+ /*
+ * Enter the mapping in the PV list for this
+ * physical page.
+ */
+
+ pai = pa_index(pa);
+ LOCK_PVH(pai);
+ pv_h = pai_to_pvh(pai);
+
+ if (pv_h->pmap == PMAP_NULL) {
+ /*
+ * No mappings yet
+ */
+ pv_h->va = v;
+ pv_h->pmap = pmap;
+ pv_h->next = PV_ENTRY_NULL;
+ if (prot & VM_PROT_EXECUTE)
+ alphacache_Iflush();
+ }
+ else {
+#if DEBUG
+ {
+ /* check that this mapping is not already there */
+ pv_entry_t e = pv_h;
+ while (e != PV_ENTRY_NULL) {
+ if (e->pmap == pmap && e->va == v)
+ panic("pmap_enter: already in pv_list");
+ e = e->next;
+ }
+ }
+#endif /* DEBUG */
+
+ /*
+ * Add new pv_entry after header.
+ */
+ if (pv_e == PV_ENTRY_NULL) {
+ PV_ALLOC(pv_e);
+ if (pv_e == PV_ENTRY_NULL) {
+ UNLOCK_PVH(pai);
+ PMAP_READ_UNLOCK(pmap, spl);
+
+ /*
+ * Refill from zone.
+ */
+ pv_e = (pv_entry_t) zalloc(pv_list_zone);
+ goto Retry;
+ }
+ }
+ pv_e->va = v;
+ pv_e->pmap = pmap;
+ pv_e->next = pv_h->next;
+ pv_h->next = pv_e;
+ /*
+ * Remember that we used the pvlist entry.
+ */
+ pv_e = PV_ENTRY_NULL;
+ }
+ UNLOCK_PVH(pai);
+ }
+
+ /*
+ * And count the mapping.
+ */
+
+ pmap->stats.resident_count++;
+ if (wired)
+ pmap->stats.wired_count++;
+
+ /*
+ * Build a template to speed up entering -
+ * only the pfn changes.
+ */
+ pte_template(pmap,template,pa,prot);
+ if (pmap == kernel_pmap)
+ template |= ALPHA_PTE_GLOBAL;
+ if (wired)
+ template |= ALPHA_PTE_WIRED;
+ i = ptes_per_vm_page;
+ do {
+ *pte = template;
+ pte++;
+ pte_increment_pa(template);
+ } while (--i > 0);
+ }
+
+ if (pv_e != PV_ENTRY_NULL) {
+ PV_FREE(pv_e);
+ }
+
+ PMAP_READ_UNLOCK(pmap, spl);
+}
+
+/*
+ * Routine: pmap_change_wiring
+ * Function: Change the wiring attribute for a map/virtual-address
+ * pair.
+ * In/out conditions:
+ * The mapping must already exist in the pmap.
+ */
+void pmap_change_wiring(map, v, wired)
+ register pmap_t map;
+ vm_offset_t v;
+ boolean_t wired;
+{
+ register pt_entry_t *pte;
+ register int i;
+ spl_t spl;
+
+if (pmap_debug) db_printf("pmap_change_wiring(%x,%x,%x)\n", map, v, wired);
+ /*
+ * We must grab the pmap system lock because we may
+ * change a pte_page queue.
+ */
+ PMAP_READ_LOCK(map, spl);
+
+ if ((pte = pmap_pte(map, v)) == PT_ENTRY_NULL)
+ panic("pmap_change_wiring: pte missing");
+
+ if (wired && !(*pte & ALPHA_PTE_WIRED)) {
+ /*
+ * wiring down mapping
+ */
+ map->stats.wired_count++;
+ i = ptes_per_vm_page;
+ do {
+ *pte++ |= ALPHA_PTE_WIRED;
+ } while (--i > 0);
+ }
+ else if (!wired && (*pte & ALPHA_PTE_WIRED)) {
+ /*
+ * unwiring mapping
+ */
+ map->stats.wired_count--;
+ i = ptes_per_vm_page;
+ do {
+ *pte &= ~ALPHA_PTE_WIRED;
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(map, spl);
+}
+
+/*
+ * Routine: pmap_extract
+ * Function:
+ * Extract the physical page address associated
+ * with the given map/virtual_address pair.
+ */
+
+vm_offset_t pmap_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+ spl_t spl;
+
+if (pmap_debug) db_printf("[%d]pmap_extract(%x,%x)\n", cpu_number(), pmap, va);
+ /*
+ * Special translation for kernel addresses
+ * in K1 or K0 space (directly mapped to
+ * physical addresses).
+ */
+ if (ISA_K1SEG(va))
+ return K1SEG_TO_PHYS(va);
+ if (ISA_K0SEG(va))
+ return K0SEG_TO_PHYS(va);
+
+ SPLVM(spl);
+ simple_lock(&pmap->lock);
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else if (!(*pte & ALPHA_PTE_VALID))
+ pa = (vm_offset_t) 0;
+ else
+ pa = pte_to_pa(*pte) + (va & ALPHA_OFFMASK);
+ simple_unlock(&pmap->lock);
+
+ /*
+ * Beware: this puts back this thread in the cpus_active set
+ */
+ SPLX(spl);
+ return(pa);
+}
+
+vm_offset_t pmap_resident_extract(pmap, va)
+ register pmap_t pmap;
+ vm_offset_t va;
+{
+ register pt_entry_t *pte;
+ register vm_offset_t pa;
+
+ /*
+ * Special translation for kernel addresses
+ * in K1 or K0 space (directly mapped to
+ * physical addresses).
+ */
+ if (ISA_K1SEG(va))
+ return K1SEG_TO_PHYS(va);
+ if (ISA_K0SEG(va))
+ return K0SEG_TO_PHYS(va);
+
+ if ((pte = pmap_pte(pmap, va)) == PT_ENTRY_NULL)
+ pa = (vm_offset_t) 0;
+ else if (!(*pte & ALPHA_PTE_VALID))
+ pa = (vm_offset_t) 0;
+ else
+ pa = pte_to_pa(*pte) + (va & ALPHA_OFFMASK);
+ return(pa);
+}
+
+/*
+ * Routine: pmap_expand
+ *
+ * Expands a pmap to be able to map the specified virtual address.
+ *
+ * Must be called with the pmap system and the pmap unlocked,
+ * since these must be unlocked to use vm_page_grab.
+ * Thus it must be called in a loop that checks whether the map
+ * has been expanded enough.
+ */
+pmap_expand(map, v)
+ register pmap_t map;
+ register vm_offset_t v;
+{
+ pt_entry_t *pdp;
+ register vm_page_t m;
+ register vm_offset_t pa;
+ register int i;
+ spl_t spl;
+
+ /* Would have to go through all maps to add this page */
+ if (map == kernel_pmap)
+ panic("pmap_expand");
+
+ /*
+ * Allocate a VM page for the level 2 page table entries,
+ * if not already there.
+ */
+ pdp = pmap_pde(map,v);
+ if ((*pdp & ALPHA_PTE_VALID) == 0) {
+
+ pt_entry_t *pte;
+
+ pa = pmap_page_table_page_alloc();
+
+ /*
+ * Re-lock the pmap and check that another thread has
+ * not already allocated the page-table page. If it
+ * has, discard the new page-table page (and try
+ * again to make sure).
+ */
+ PMAP_READ_LOCK(map, spl);
+
+ if (*pdp & ALPHA_PTE_VALID) {
+ /*
+ * Oops...
+ */
+ PMAP_READ_UNLOCK(map, spl);
+ pmap_page_table_page_dealloc(pa);
+ return;
+ }
+ /*
+ * Map the page.
+ */
+ i = ptes_per_vm_page;
+ pte = pdp;
+ do {
+ pte_ktemplate(*pte,pa,VM_PROT_READ|VM_PROT_WRITE);
+ pte++;
+ pa += ALPHA_PGBYTES;
+ } while (--i > 0);
+ PMAP_READ_UNLOCK(map, spl);
+ }
+
+ /*
+ * Allocate a level 3 page table.
+ */
+
+ pa = pmap_page_table_page_alloc();
+
+ /*
+ * Re-lock the pmap and check that another thread has
+ * not already allocated the page-table page. If it
+ * has, we are done.
+ */
+ PMAP_READ_LOCK(map, spl);
+
+ if (pmap_pte(map, v) != PT_ENTRY_NULL) {
+ PMAP_READ_UNLOCK(map, spl);
+ pmap_page_table_page_dealloc(pa);
+ return;
+ }
+
+ /*
+ * Set the page directory entry for this page table.
+ * If we have allocated more than one hardware page,
+ * set several page directory entries.
+ */
+ i = ptes_per_vm_page;
+ pdp = (pt_entry_t *)ptetokv(*pdp);
+ pdp = &pdp[pte2num(v)];
+ do {
+ pte_ktemplate(*pdp,pa,VM_PROT_READ|VM_PROT_WRITE);
+ pdp++;
+ pa += ALPHA_PGBYTES;
+ } while (--i > 0);
+ PMAP_READ_UNLOCK(map, spl);
+ return;
+}
+
+/*
+ * Copy the range specified by src_addr/len
+ * from the source map to the range dst_addr/len
+ * in the destination map.
+ *
+ * This routine is only advisory and need not do anything.
+ */
+#if 0
+void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
+ pmap_t dst_pmap;
+ pmap_t src_pmap;
+ vm_offset_t dst_addr;
+ vm_size_t len;
+ vm_offset_t src_addr;
+{
+#ifdef lint
+ dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
+#endif /* lint */
+}
+#endif
+
+/*
+ * Routine: pmap_collect
+ * Function:
+ * Garbage collects the physical map system for
+ * pages which are no longer used.
+ * Success need not be guaranteed -- that is, there
+ * may well be pages which are not referenced, but
+ * others may be collected.
+ * Usage:
+ * Called by the pageout daemon when pages are scarce.
+ */
+void pmap_collect(p)
+ pmap_t p;
+{
+#if notyet
+
+ register pt_entry_t *pdp, *ptp;
+ pt_entry_t *eptp;
+ vm_offset_t pa;
+ spl_t spl;
+ int wired;
+
+ if (p == PMAP_NULL)
+ return;
+
+ if (p == kernel_pmap)
+ return;
+
+ /*
+ * Garbage collect map.
+ */
+ PMAP_READ_LOCK(p, spl);
+ PMAP_UPDATE_TLBS(p, VM_MIN_ADDRESS, VM_MAX_ADDRESS);
+ pmap_destroy_tlbpid(p->pid, FALSE);
+
+ for (pdp = p->dirbase;
+ pdp < pmap_pde(p,VM_MIN_KERNEL_ADDRESS);
+ pdp += ptes_per_vm_page)
+ {
+ if (*pdp & ALPHA_PTE_VALID) {
+
+ pa = pte_to_pa(*pdp);
+ ptp = (pt_entry_t *)phystokv(pa);
+ eptp = ptp + NPTES*ptes_per_vm_page;
+
+ /*
+ * If the pte page has any wired mappings, we cannot
+ * free it.
+ */
+ wired = 0;
+ {
+ register pt_entry_t *ptep;
+ for (ptep = ptp; ptep < eptp; ptep++) {
+ if (*ptep & ALPHA_PTE_WIRED) {
+ wired = 1;
+ break;
+ }
+ }
+ }
+ if (!wired) {
+ /*
+ * Remove the virtual addresses mapped by this pte page.
+ */
+..... pmap_remove_range_2(p,
+ pdetova(pdp - p->dirbase),
+ ptp,
+ eptp);
+
+ /*
+ * Invalidate the page directory pointer.
+ */
+ {
+ register int i = ptes_per_vm_page;
+ register pt_entry_t *pdep = pdp;
+ do {
+ *pdep++ = 0;
+ } while (--i > 0);
+ }
+
+ PMAP_READ_UNLOCK(p, spl);
+
+ /*
+ * And free the pte page itself.
+ */
+ {
+ register vm_page_t m;
+
+ vm_object_lock(pmap_object);
+ m = vm_page_lookup(pmap_object, pa);
+ if (m == VM_PAGE_NULL)
+ panic("pmap_collect: pte page not in object");
+ vm_page_lock_queues();
+ vm_page_free(m);
+ inuse_ptepages_count--;
+ vm_page_unlock_queues();
+ vm_object_unlock(pmap_object);
+ }
+
+ PMAP_READ_LOCK(p, spl);
+ }
+ }
+ }
+ PMAP_READ_UNLOCK(p, spl);
+ return;
+#endif
+}
+
+/*
+ * Routine: pmap_activate
+ * Function:
+ * Binds the given physical map to the given
+ * processor, and returns a hardware map description.
+ */
+#if 0
+void pmap_activate(my_pmap, th, my_cpu)
+ register pmap_t my_pmap;
+ thread_t th;
+ int my_cpu;
+{
+ PMAP_ACTIVATE(my_pmap, th, my_cpu);
+}
+#endif
+
+/*
+ * Routine: pmap_deactivate
+ * Function:
+ * Indicates that the given physical map is no longer
+ * in use on the specified processor. (This is a macro
+ * in pmap.h)
+ */
+#if 0
+void pmap_deactivate(pmap, th, which_cpu)
+ pmap_t pmap;
+ thread_t th;
+ int which_cpu;
+{
+#ifdef lint
+ pmap++; th++; which_cpu++;
+#endif
+ PMAP_DEACTIVATE(pmap, th, which_cpu);
+}
+#endif
+
+/*
+ * Routine: pmap_kernel
+ * Function:
+ * Returns the physical map handle for the kernel.
+ */
+#if 0
+pmap_t pmap_kernel()
+{
+ return (kernel_pmap);
+}
+#endif
+
+/*
+ * pmap_zero_page zeros the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 1
+pmap_zero_page(phys)
+ register vm_offset_t phys;
+{
+
+ assert(phys != vm_page_fictitious_addr);
+
+if (pmap_debug || (phys == pmap_suspect_phys)) db_printf("pmap_zero_page(%x)\n", phys);
+ bzero(phystokv(phys), PAGE_SIZE);
+}
+#endif
+
+/*
+ * pmap_copy_page copies the specified (machine independent) page.
+ * See machine/phys.c or machine/phys.s for implementation.
+ */
+#if 1 /* fornow */
+pmap_copy_page(src, dst)
+ vm_offset_t src, dst;
+{
+ assert(src != vm_page_fictitious_addr);
+ assert(dst != vm_page_fictitious_addr);
+
+if (pmap_debug || (src == pmap_suspect_phys) || (dst == pmap_suspect_phys)) db_printf("pmap_copy_page(%x,%x)\n", src, dst);
+ aligned_block_copy(phystokv(src), phystokv(dst), PAGE_SIZE);
+
+}
+#endif
+
+/*
+ * Routine: pmap_pageable
+ * Function:
+ * Make the specified pages (by pmap, offset)
+ * pageable (or not) as requested.
+ *
+ * A page which is not pageable may not take
+ * a fault; therefore, its page table entry
+ * must remain valid for the duration.
+ *
+ * This routine is merely advisory; pmap_enter
+ * will specify that these pages are to be wired
+ * down (or not) as appropriate.
+ */
+pmap_pageable(pmap, start, end, pageable)
+ pmap_t pmap;
+ vm_offset_t start;
+ vm_offset_t end;
+ boolean_t pageable;
+{
+#ifdef lint
+ pmap++; start++; end++; pageable++;
+#endif
+}
+
+/*
+ * Clear specified attribute bits.
+ */
+void
+phys_attribute_clear(phys, bits)
+ vm_offset_t phys;
+ int bits;
+{
+ pv_entry_t pv_h;
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ int pai;
+ register pmap_t pmap;
+ spl_t spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Lock the pmap system first, since we will be changing
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ /*
+ * Walk down PV list, clearing all modify or reference bits.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ register vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+#if 0
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & ALPHA_PTE_VALID);
+ /* assert(pte_to_phys(*pte) == phys); */
+#endif
+
+ /*
+ * Invalidate TLBs for all CPUs using this mapping.
+ */
+ PMAP_UPDATE_TLBS(pmap, va, va + PAGE_SIZE);
+ }
+
+ /*
+ * Clear modify or reference bits.
+ */
+ {
+ register int i = ptes_per_vm_page;
+ do {
+ *pte &= ~bits;
+ } while (--i > 0);
+ }
+ simple_unlock(&pmap->lock);
+ }
+ }
+
+ pmap_phys_attributes[pai] &= ~ (bits >> 16);
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Check specified attribute bits.
+ */
+boolean_t
+phys_attribute_test(phys, bits)
+ vm_offset_t phys;
+ int bits;
+{
+ pv_entry_t pv_h;
+ register pv_entry_t pv_e;
+ register pt_entry_t *pte;
+ int pai;
+ register pmap_t pmap;
+ spl_t spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return (FALSE);
+ }
+
+ /*
+ * Lock the pmap system first, since we will be checking
+ * several pmaps.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pv_h = pai_to_pvh(pai);
+
+ if (pmap_phys_attributes[pai] & (bits >> 16)) {
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+
+ /*
+ * Walk down PV list, checking all mappings.
+ * We do not have to lock the pv_list because we have
+ * the entire pmap system locked.
+ */
+ if (pv_h->pmap != PMAP_NULL) {
+ /*
+ * There are some mappings.
+ */
+ for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
+
+ pmap = pv_e->pmap;
+ /*
+ * Lock the pmap to block pmap_extract and similar routines.
+ */
+ simple_lock(&pmap->lock);
+
+ {
+ register vm_offset_t va;
+
+ va = pv_e->va;
+ pte = pmap_pte(pmap, va);
+
+#if 0
+ /*
+ * Consistency checks.
+ */
+ assert(*pte & ALPHA_PTE_VALID);
+ /* assert(pte_to_phys(*pte) == phys); */
+#endif
+ }
+
+ /*
+ * Check modify or reference bits.
+ */
+ {
+ register int i = ptes_per_vm_page;
+
+ do {
+ if (*pte & bits) {
+ simple_unlock(&pmap->lock);
+ PMAP_WRITE_UNLOCK(spl);
+ return (TRUE);
+ }
+ } while (--i > 0);
+ }
+ simple_unlock(&pmap->lock);
+ }
+ }
+ PMAP_WRITE_UNLOCK(spl);
+ return (FALSE);
+}
+
+/*
+ * Set specified attribute bits. <ugly>
+ */
+void
+phys_attribute_set(phys, bits)
+ vm_offset_t phys;
+ int bits;
+{
+ int pai;
+ spl_t spl;
+
+ assert(phys != vm_page_fictitious_addr);
+ if (!valid_page(phys)) {
+ /*
+ * Not a managed page.
+ */
+ return;
+ }
+
+ /*
+ * Lock the pmap system.
+ */
+
+ PMAP_WRITE_LOCK(spl);
+
+ pai = pa_index(phys);
+ pmap_phys_attributes[pai] |= (bits >> 16);
+
+ PMAP_WRITE_UNLOCK(spl);
+}
+
+/*
+ * Clear the modify bits on the specified physical page.
+ */
+
+void pmap_clear_modify(phys)
+ register vm_offset_t phys;
+{
+if (pmap_debug) db_printf("pmap_clear_mod(%x)\n", phys);
+ phys_attribute_clear(phys, ALPHA_PTE_MOD);
+}
+
+/*
+ * Set the modify bits on the specified physical page.
+ */
+
+void pmap_set_modify(phys)
+ register vm_offset_t phys;
+{
+if (pmap_debug) db_printf("pmap_set_mod(%x)\n", phys);
+ phys_attribute_set(phys, ALPHA_PTE_MOD);
+}
+
+/*
+ * pmap_is_modified:
+ *
+ * Return whether or not the specified physical page is modified
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_modified(phys)
+ register vm_offset_t phys;
+{
+if (pmap_debug) db_printf("pmap_is_mod(%x)\n", phys);
+ return (phys_attribute_test(phys, ALPHA_PTE_MOD));
+}
+
+/*
+ * pmap_clear_reference:
+ *
+ * Clear the reference bit on the specified physical page.
+ */
+
+void pmap_clear_reference(phys)
+ vm_offset_t phys;
+{
+if (pmap_debug) db_printf("pmap_clear_ref(%x)\n", phys);
+ phys_attribute_clear(phys, ALPHA_PTE_REF);
+}
+
+/*
+ * pmap_is_referenced:
+ *
+ * Return whether or not the specified physical page is referenced
+ * by any physical maps.
+ */
+
+boolean_t pmap_is_referenced(phys)
+ vm_offset_t phys;
+{
+if (pmap_debug) db_printf("pmap_is_ref(%x)\n", phys);
+ return (phys_attribute_test(phys, ALPHA_PTE_REF));
+}
+
+#if NCPUS > 1
+/*
+* TLB Coherence Code (TLB "shootdown" code)
+*
+* Threads that belong to the same task share the same address space and
+* hence share a pmap. However, they may run on distinct cpus and thus
+* have distinct TLBs that cache page table entries. In order to guarantee
+* the TLBs are consistent, whenever a pmap is changed, all threads that
+* are active in that pmap must have their TLB updated. To keep track of
+* this information, the set of cpus that are currently using a pmap is
+* maintained within each pmap structure (cpus_using). Pmap_activate() and
+* pmap_deactivate add and remove, respectively, a cpu from this set.
+* Since the TLBs are not addressable over the bus, each processor must
+* flush its own TLB; a processor that needs to invalidate another TLB
+* needs to interrupt the processor that owns that TLB to signal the
+* update.
+*
+* Whenever a pmap is updated, the lock on that pmap is locked, and all
+* cpus using the pmap are signaled to invalidate. All threads that need
+* to activate a pmap must wait for the lock to clear to await any updates
+* in progress before using the pmap. They must ACQUIRE the lock to add
+* their cpu to the cpus_using set. An implicit assumption made
+* throughout the TLB code is that all kernel code that runs at or higher
+* than splvm blocks out update interrupts, and that such code does not
+* touch pageable pages.
+*
+* A shootdown interrupt serves another function besides signaling a
+* processor to invalidate. The interrupt routine (pmap_update_interrupt)
+* waits for the both the pmap lock (and the kernel pmap lock) to clear,
+* preventing user code from making implicit pmap updates while the
+* sending processor is performing its update. (This could happen via a
+* user data write reference that turns on the modify bit in the page
+* table). It must wait for any kernel updates that may have started
+* concurrently with a user pmap update because the IPC code
+* changes mappings.
+* Spinning on the VALUES of the locks is sufficient (rather than
+* having to acquire the locks) because any updates that occur subsequent
+* to finding the lock unlocked will be signaled via another interrupt.
+* (This assumes the interrupt is cleared before the low level interrupt code
+* calls pmap_update_interrupt()).
+*
+* The signaling processor must wait for any implicit updates in progress
+* to terminate before continuing with its update. Thus it must wait for an
+* acknowledgement of the interrupt from each processor for which such
+* references could be made. For maintaining this information, a set
+* cpus_active is used. A cpu is in this set if and only if it can
+* use a pmap. When pmap_update_interrupt() is entered, a cpu is removed from
+* this set; when all such cpus are removed, it is safe to update.
+*
+* Before attempting to acquire the update lock on a pmap, a cpu (A) must
+* be at least at the priority of the interprocessor interrupt
+* (splip<=splvm). Otherwise, A could grab a lock and be interrupted by a
+* kernel update; it would spin forever in pmap_update_interrupt() trying
+* to acquire the user pmap lock it had already acquired. Furthermore A
+* must remove itself from cpus_active. Otherwise, another cpu holding
+* the lock (B) could be in the process of sending an update signal to A,
+* and thus be waiting for A to remove itself from cpus_active. If A is
+* spinning on the lock at priority this will never happen and a deadlock
+* will result.
+*/
+
+/*
+ * Signal another CPU that it must flush its TLB
+ */
+void signal_cpus(use_list, pmap, start, end)
+ cpu_set use_list;
+ pmap_t pmap;
+ vm_offset_t start, end;
+{
+ register int which_cpu, j;
+ register pmap_update_list_t update_list_p;
+
+ while ((which_cpu = ffs(use_list)) != 0) {
+ which_cpu -= 1; /* convert to 0 origin */
+
+ update_list_p = &cpu_update_list[which_cpu];
+ simple_lock(&update_list_p->lock);
+
+ j = update_list_p->count;
+ if (j >= UPDATE_LIST_SIZE) {
+ /*
+ * list overflowed. Change last item to
+ * indicate overflow.
+ */
+ update_list_p->item[UPDATE_LIST_SIZE-1].pmap = kernel_pmap;
+ update_list_p->item[UPDATE_LIST_SIZE-1].start = VM_MIN_ADDRESS;
+ update_list_p->item[UPDATE_LIST_SIZE-1].end = VM_MAX_KERNEL_ADDRESS;
+ }
+ else {
+ update_list_p->item[j].pmap = pmap;
+ update_list_p->item[j].start = start;
+ update_list_p->item[j].end = end;
+ update_list_p->count = j+1;
+ }
+ cpu_update_needed[which_cpu] = TRUE;
+ simple_unlock(&update_list_p->lock);
+
+ if ((cpus_idle & (1 << which_cpu)) == 0)
+ interrupt_processor(which_cpu);
+ use_list &= ~(1 << which_cpu);
+ }
+}
+
+void process_pmap_updates(my_pmap)
+ register pmap_t my_pmap;
+{
+ register int my_cpu = cpu_number();
+ register pmap_update_list_t update_list_p;
+ register int j;
+ register pmap_t pmap;
+
+ update_list_p = &cpu_update_list[my_cpu];
+ simple_lock(&update_list_p->lock);
+
+ for (j = 0; j < update_list_p->count; j++) {
+ pmap = update_list_p->item[j].pmap;
+ if (pmap == my_pmap ||
+ pmap == kernel_pmap) {
+
+ INVALIDATE_TLB(update_list_p->item[j].start,
+ update_list_p->item[j].end);
+ }
+ }
+ update_list_p->count = 0;
+ cpu_update_needed[my_cpu] = FALSE;
+ simple_unlock(&update_list_p->lock);
+}
+
+#if MACH_KDB
+
+static boolean_t db_interp_int[NCPUS];
+int db_inside_pmap_update[NCPUS];
+int suicide_cpu;
+
+cpu_interrupt_to_db(i)
+ int i;
+{
+ db_interp_int[i] = TRUE;
+ interrupt_processor(i);
+}
+#endif
+
+/*
+ * Interrupt routine for TBIA requested from other processor.
+ */
+void pmap_update_interrupt()
+{
+ register int my_cpu;
+ register pmap_t my_pmap;
+ spl_t s;
+
+ my_cpu = cpu_number();
+
+ db_inside_pmap_update[my_cpu]++;
+#if MACH_KDB
+ if (db_interp_int[my_cpu]) {
+ db_interp_int[my_cpu] = FALSE;
+ remote_db_enter();
+ /* In case another processor modified text */
+ alphacache_Iflush();
+if (cpu_number() == suicide_cpu) halt();
+ goto out; /* uhmmm, maybe should do updates just in case */
+ }
+#endif
+ /*
+ * Exit now if we're idle. We'll pick up the update request
+ * when we go active, and we must not put ourselves back in
+ * the active set because we'll never process the interrupt
+ * while we're idle (thus hanging the system).
+ */
+ if (cpus_idle & (1 << my_cpu))
+ goto out;
+
+ if (current_thread() == THREAD_NULL)
+ my_pmap = kernel_pmap;
+ else {
+ my_pmap = current_pmap();
+ if (!pmap_in_use(my_pmap, my_cpu))
+ my_pmap = kernel_pmap;
+ }
+
+ /*
+ * Raise spl to splvm (above splip) to block out pmap_extract
+ * from IO code (which would put this cpu back in the active
+ * set).
+ */
+ s = splvm();
+
+ do {
+
+ /*
+ * Indicate that we're not using either user or kernel
+ * pmap.
+ */
+ i_bit_clear(my_cpu, &cpus_active);
+
+ /*
+ * Wait for any pmap updates in progress, on either user
+ * or kernel pmap.
+ */
+ while (*(volatile int *)&my_pmap->lock.lock_data ||
+ *(volatile int *)&kernel_pmap->lock.lock_data)
+ continue;
+
+ process_pmap_updates(my_pmap);
+
+ i_bit_set(my_cpu, &cpus_active);
+
+ } while (cpu_update_needed[my_cpu]);
+
+ splx(s);
+out:
+ db_inside_pmap_update[my_cpu]--;
+}
+#else NCPUS > 1
+/*
+ * Dummy routine to satisfy external reference.
+ */
+void pmap_update_interrupt()
+{
+ /* should never be called. */
+}
+#endif /* NCPUS > 1 */
+
+void
+set_ptbr(pmap_t map, pcb_t pcb, boolean_t switchit)
+{
+ /* optimize later */
+ vm_offset_t pa;
+
+ pa = pmap_resident_extract(kernel_pmap, map->dirbase);
+ if (pa == 0)
+ panic("set_ptbr");
+ pcb->mss.hw_pcb.ptbr = alpha_btop(pa);
+ if (switchit) {
+ pcb->mss.hw_pcb.asn = map->pid;
+ swpctxt(kvtophys((vm_offset_t) pcb), &(pcb)->mss.hw_pcb.ksp);
+ }
+}
+
+/***************************************************************************
+ *
+ * TLBPID Management
+ *
+ * This is basically a unique number generator, with the twist
+ * that numbers are in a given range (dynamically defined).
+ * All things considered, I did it right in the MIPS case.
+ */
+
+int pmap_max_asn = 63; /* Default value at boot. Should be 2^ */
+
+decl_simple_lock_data(static, tlbpid_lock)
+static struct pmap **pids_in_use;
+static int pmap_next_pid;
+
+pmap_tlbpid_init()
+{
+ simple_lock_init(&tlbpid_lock);
+
+#define MAX_PID_EVER 1023 /* change if necessary, this is one page */
+ pids_in_use = (struct pmap **)
+ pmap_steal_memory( (MAX_PID_EVER+1) * sizeof(struct pmap *));
+ bzero(pids_in_use, (MAX_PID_EVER+1) * sizeof(struct pmap *));
+#undef MAX_PID_EVER
+
+ pmap_next_pid = 1;
+}
+
+/*
+ * Axioms:
+ * - pmap_next_pid always points to a free one, unless the table is full;
+ * in that case it points to a likely candidate for recycling.
+ * - pmap.pid prevents from making duplicates: if -1 there is no
+ * pid for it, otherwise there is one and only one entry at that index.
+ *
+ * pmap_assign_tlbpid provides a tlbpid for the given pmap, creating
+ * a new one if necessary
+ * pmap_destroy_tlbpid returns a tlbpid to the pool of available ones
+ */
+
+pmap_assign_tlbpid(map)
+ struct pmap *map;
+{
+ register int pid, next_pid;
+
+ if (map->pid < 0) {
+
+ simple_lock(&tlbpid_lock);
+
+ next_pid = pmap_next_pid;
+ if (pids_in_use[next_pid]) {
+ /* are we _really_ sure it's full ? */
+ for (pid = 1; pid < pmap_max_asn; pid++)
+ if (pids_in_use[pid] == PMAP_NULL) {
+ /* aha! */
+ next_pid = pid;
+ goto got_a_free_one;
+ }
+ /* Table full */
+ while (pids_in_use[next_pid]->cpus_using) {
+ if (++next_pid == pmap_max_asn)
+ next_pid = 1;
+ }
+ pmap_destroy_tlbpid(next_pid, TRUE);
+ }
+got_a_free_one:
+ pids_in_use[next_pid] = map;
+ map->pid = next_pid;
+ if (++next_pid == pmap_max_asn)
+ next_pid = 1;
+ pmap_next_pid = next_pid;
+
+ simple_unlock(&tlbpid_lock);
+ }
+}
+
+pmap_destroy_tlbpid(pid, locked)
+ int pid;
+ boolean_t locked;
+{
+ struct pmap *map;
+
+ if (pid < 0) /* no longer in use */
+ return;
+
+ if (!locked) simple_lock(&tlbpid_lock);
+
+ /*
+ * Make the pid available, and the map unassigned.
+ */
+ map = pids_in_use[pid];
+ pids_in_use[pid] = PMAP_NULL;
+ map->pid = -1;
+
+ if (!locked) simple_unlock(&tlbpid_lock);
+}
+
+#if 1 /* DEBUG */
+
+print_pv_list()
+{
+ pv_entry_t p;
+ vm_offset_t phys;
+
+ db_printf("phys pages %x < p < %x\n", vm_first_phys, vm_last_phys);
+ for (phys = vm_first_phys; phys < vm_last_phys; phys += PAGE_SIZE) {
+ p = pai_to_pvh(pa_index(phys));
+ if (p->pmap != PMAP_NULL) {
+ db_printf("%x: %x %x\n", phys, p->pmap, p->va);
+ while (p = p->next)
+ db_printf("\t\t%x %x\n", p->pmap, p->va);
+ }
+ }
+}
+
+#endif
diff --git a/alpha/alpha/pmap.h b/alpha/alpha/pmap.h
new file mode 100644
index 00000000..489bdbf1
--- /dev/null
+++ b/alpha/alpha/pmap.h
@@ -0,0 +1,403 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: pmap.h,v $
+ * Revision 2.3 93/01/19 08:59:45 danner
+ * Do not allocate cpusets as commons, to avoid
+ * cacheline conflicts.
+ * [93/01/15 af]
+ *
+ * Revision 2.2 93/01/14 17:13:51 danner
+ * Created, from dbg's i386 pmap module.
+ * [92/06/15 af]
+ *
+ *
+ */
+
+/*
+ * File: pmap.h
+ *
+ * Author: David Golub (mods for Alpha by Alessandro Forin)
+ * Date: 1988 ca.
+ *
+ * Machine-dependent structures for the physical map module.
+ */
+
+#ifndef _PMAP_MACHINE_
+#define _PMAP_MACHINE_ 1
+
+#ifndef ASSEMBLER
+
+#include <kern/zalloc.h>
+#include <kern/lock.h>
+#include <mach/machine/vm_param.h>
+#include <mach/vm_statistics.h>
+#include <mach/kern_return.h>
+
+/*
+ * Alpha Page Table Entry
+ */
+
+typedef unsigned long pt_entry_t;
+#define PT_ENTRY_NULL ((pt_entry_t *) 0)
+
+#endif ASSEMBLER
+
+#define ALPHA_OFFMASK (ALPHA_PGBYTES-1) /* offset within page */
+
+#define SEG_MASK ((ALPHA_PGBYTES / 8)-1) /* masks for segments */
+#define SEG3_SHIFT (ALPHA_PGSHIFT) /* shifts for segments */
+#define SEG2_SHIFT (SEG3_SHIFT+(ALPHA_PGSHIFT-3))
+#define SEG1_SHIFT (SEG2_SHIFT+(ALPHA_PGSHIFT-3))
+
+/*
+ * Convert address offset to page descriptor index
+ */
+#define pdenum(a) (((a) >> SEG1_SHIFT) & SEG_MASK)
+
+/*
+ * Convert page descriptor index to user virtual address
+ */
+#define pdetova(a) ((vm_offset_t)(a) << SEG1_SHIFT)
+#define pde2tova(a) ((vm_offset_t)(a) << SEG2_SHIFT)
+#define pde3tova(a) ((vm_offset_t)(a) << SEG3_SHIFT)
+
+/*
+ * Convert address offset to second level page table index
+ */
+#define pte2num(a) (((a) >> SEG2_SHIFT) & SEG_MASK)
+
+/*
+ * Convert address offset to third level page table index
+ */
+#define pte3num(a) (((a) >> SEG3_SHIFT) & SEG_MASK)
+
+#define NPTES (alpha_ptob(1)/sizeof(pt_entry_t))
+#define NPDES (alpha_ptob(1)/sizeof(pt_entry_t))
+
+/*
+ * Hardware pte bit definitions (to be used directly on the ptes
+ * without using the bit fields).
+ */
+
+#define ALPHA_PTE_VALID 0x1
+
+#define ALPHA_PTE_FAULT_ON_x 0xe
+
+#define ALPHA_PTE_GLOBAL 0x10
+#define ALPHA_PTE_GRANULARITY 0x60
+
+#define ALPHA_PTE_PROT 0xff00
+#define ALPHA_PTE_PROTOFF 8
+#define ALPHA_PTE_KW 0x10
+#define ALPHA_PTE_UW 0x80
+#define ALPHA_PTE_KR 0x01
+#define ALPHA_PTE_UR 0x08
+
+#define ALPHA_PTE_WRITE 0x00009000
+
+#define ALPHA_PTE_SOFTWARE 0xffff0000
+#define ALPHA_PTE_WIRED 0x00010000
+#define ALPHA_PTE_REF 0x00020000
+#define ALPHA_PTE_MOD 0x00040000
+
+#define ALPHA_PTE_PFN 0xffffffff00000000
+
+#define pa_to_pte(a) (alpha_btop(a) << 32)
+#define pte_to_pa(p) (alpha_ptob( (p) >> 32 ))
+#define pte_increment_pa(p) ((p) += pa_to_pte(ALPHA_PGBYTES))
+
+/*
+ * Convert page table entry to kernel virtual address
+ */
+#define ptetokv(a) (phystokv(pte_to_pa(a)))
+
+#ifndef ASSEMBLER
+typedef volatile long cpu_set; /* set of CPUs - must be <= 64 */
+ /* changed by other processors */
+
+struct pmap {
+ pt_entry_t *dirbase; /* page directory pointer register */
+ int pid; /* TLBPID when in use */
+ int ref_count; /* reference count */
+ decl_simple_lock_data(,lock)
+ /* lock on map */
+ struct pmap_statistics stats; /* map statistics */
+ cpu_set cpus_using; /* bitmap of cpus using pmap */
+ int (*hacking)(); /* horrible things needed */
+};
+
+typedef struct pmap *pmap_t;
+
+#define PMAP_NULL ((pmap_t) 0)
+
+extern vm_offset_t kvtophys(vm_offset_t);
+extern void set_ptbr(/* pmap_t map, pcb_t pcb */);
+
+#if NCPUS > 1
+/*
+ * List of cpus that are actively using mapped memory. Any
+ * pmap update operation must wait for all cpus in this list.
+ * Update operations must still be queued to cpus not in this
+ * list.
+ */
+extern cpu_set cpus_active;
+
+/*
+ * List of cpus that are idle, but still operating, and will want
+ * to see any kernel pmap updates when they become active.
+ */
+extern cpu_set cpus_idle;
+
+/*
+ * Quick test for pmap update requests.
+ */
+extern volatile
+boolean_t cpu_update_needed[NCPUS];
+
+/*
+ * External declarations for PMAP_ACTIVATE.
+ */
+
+void process_pmap_updates();
+void pmap_update_interrupt();
+extern pmap_t kernel_pmap;
+
+#endif NCPUS > 1
+
+/*
+ * Machine dependent routines that are used only for Alpha.
+ */
+
+pt_entry_t *pmap_pte();
+
+/*
+ * Macros for speed.
+ */
+
+#if NCPUS > 1
+
+/*
+ * For multiple CPUS, PMAP_ACTIVATE and PMAP_DEACTIVATE must manage
+ * fields to control TLB invalidation on other CPUS.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&kernel_pmap->lock); \
+ \
+ /* \
+ * Process invalidate requests for the kernel pmap. \
+ */ \
+ if (cpu_update_needed[(my_cpu)]) \
+ process_pmap_updates(kernel_pmap); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &kernel_pmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&kernel_pmap->lock); \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &kernel_pmap->cpus_using); \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ register pmap_t tpmap = (pmap); \
+ register pcb_t pcb = (th)->pcb; \
+ \
+ if (tpmap == kernel_pmap) { \
+ /* \
+ * If this is the kernel pmap, switch to its page tables. \
+ */ \
+ set_ptbr(tpmap,pcb,TRUE); \
+ } \
+ else { \
+ /* \
+ * Let pmap updates proceed while we wait for this pmap. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_active); \
+ \
+ /* \
+ * Lock the pmap to put this cpu in its active set. \
+ * Wait for updates here. \
+ */ \
+ simple_lock(&tpmap->lock); \
+ \
+ /* \
+ * No need to invalidate the TLB - the entire user pmap \
+ * will be invalidated by reloading dirbase. \
+ */ \
+ if (tpmap->pid < 0) pmap_assign_tlbpid(tpmap); \
+ set_ptbr(tpmap, pcb, TRUE); \
+ \
+ /* \
+ * Mark that this cpu is using the pmap. \
+ */ \
+ i_bit_set((my_cpu), &tpmap->cpus_using); \
+ \
+ /* \
+ * Mark this cpu active - IPL will be lowered by \
+ * load_context(). \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ \
+ simple_unlock(&tpmap->lock); \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
+ register pmap_t tpmap = (pmap); \
+ \
+ /* \
+ * Do nothing if this is the kernel pmap. \
+ */ \
+ if (tpmap != kernel_pmap) { \
+ /* \
+ * Mark pmap no longer in use by this cpu even if \
+ * pmap is locked against updates. \
+ */ \
+ i_bit_clear((my_cpu), &(pmap)->cpus_using); \
+ } \
+}
+
+#define MARK_CPU_IDLE(my_cpu) { \
+ /* \
+ * Mark this cpu idle, and remove it from the active set, \
+ * since it is not actively using any pmap. Signal_cpus \
+ * will notice that it is idle, and avoid signaling it, \
+ * but will queue the update request for when the cpu \
+ * becomes active. \
+ */ \
+ spl_t s = splvm(); \
+ i_bit_set((my_cpu), &cpus_idle); \
+ i_bit_clear((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#define MARK_CPU_ACTIVE(my_cpu) { \
+ \
+ spl_t s = splvm(); \
+ /* \
+ * If a kernel_pmap update was requested while this cpu \
+ * was idle, process it as if we got the interrupt. \
+ * Before doing so, remove this cpu from the idle set. \
+ * Since we do not grab any pmap locks while we flush \
+ * our TLB, another cpu may start an update operation \
+ * before we finish. Removing this cpu from the idle \
+ * set assures that we will receive another update \
+ * interrupt if this happens. \
+ */ \
+ i_bit_clear((my_cpu), &cpus_idle); \
+ \
+ if (cpu_update_needed[(my_cpu)]) \
+ pmap_update_interrupt(); \
+ \
+ /* \
+ * Mark that this cpu is now active. \
+ */ \
+ i_bit_set((my_cpu), &cpus_active); \
+ splx(s); \
+}
+
+#else NCPUS > 1
+
+/*
+ * With only one CPU, we just have to indicate whether the pmap is
+ * in use.
+ */
+
+#define PMAP_ACTIVATE_KERNEL(my_cpu) { \
+ kernel_pmap->cpus_using = TRUE; \
+}
+
+#define PMAP_DEACTIVATE_KERNEL(my_cpu) { \
+ kernel_pmap->cpus_using = FALSE; \
+}
+
+#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
+ register pmap_t tpmap = (pmap); \
+ register pcb_t pcb = (th)->pcb; \
+ \
+ if (tpmap->pid < 0) pmap_assign_tlbpid(tpmap); \
+ set_ptbr(tpmap,pcb,TRUE); \
+ if (tpmap != kernel_pmap) { \
+ tpmap->cpus_using = TRUE; \
+ } \
+}
+
+#define PMAP_DEACTIVATE_USER(pmap, thread, cpu) { \
+ if ((pmap) != kernel_pmap) \
+ (pmap)->cpus_using = FALSE; \
+}
+
+#endif NCPUS > 1
+
+#define pmap_kernel() (kernel_pmap)
+#define pmap_resident_count(pmap) ((pmap)->stats.resident_count)
+#define pmap_phys_address(frame) ((vm_offset_t) (alpha_ptob(frame)))
+#define pmap_phys_to_frame(phys) ((alpha_btop(phys)))
+#define pmap_copy(dst_pmap,src_pmap,dst_addr,len,src_addr)
+#define pmap_attribute(pmap,addr,size,attr,value) \
+ (KERN_INVALID_ADDRESS)
+
+/*
+ * Data structures this module exports
+ */
+extern pmap_t kernel_pmap; /* pointer to the kernel pmap */
+
+
+#endif ASSEMBLER
+
+/*
+ * We want to implement pmap_steal_memory and pmap_startup.
+ */
+
+#define MACHINE_PAGES
+
+#endif _PMAP_MACHINE_
diff --git a/alpha/alpha/prom_interface.S b/alpha/alpha/prom_interface.S
new file mode 100644
index 00000000..03d6c20e
--- /dev/null
+++ b/alpha/alpha/prom_interface.S
@@ -0,0 +1,162 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: prom_interface.s,v $
+ * Revision 2.4 93/05/20 21:02:04 mrt
+ * Changed use of zero to ra in call to NESTED.
+ * [93/05/18 mrt]
+ *
+ * Revision 2.3 93/03/09 10:50:49 danner
+ * Changed prom dispatching to be more link-safe.
+ * [93/02/20 af]
+ *
+ * Revision 2.2 93/02/05 08:00:04 danner
+ * Simplification to known code from af's boot code.
+ * [93/01/14 jeffreyh]
+ * Dispatch routine works now. Puts broken, Jeffrey fixed it
+ * so I am not [prom takes only 32bits anyways].
+ * [93/01/15 af]
+ * Added reference to doc for the HWRPB &co.
+ * [92/12/22 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:18:00 af]
+ *
+ * Created.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: prom_interface.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * PROM entrypoints of interest to kernel
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "VMS for Alpha Platforms Internals and Data Structures"
+ * Digital Press 1992, Burlington, MA 01803
+ * Order number EY-L466E-P1/2, ISBN 1-55558-095-5
+ * [Especially volume 1, chapter 33 "Bootstrap processing"]
+ */
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+#include <alpha/prom_interface.h>
+
+#include <platforms.h> /* disgusting for now */
+
+ .data
+ .align 4
+EXPORT(prom_dispatch_v) .quad 0,0
+
+ .text
+ .align 4
+
+/*
+ * Dispatcher routine. Implements prom's calling machinery,
+ * saves our callee-saved registers as required by C.
+ */
+#define D_RA (7*8)
+#define D_S0 (8*8)
+#define D_S1 (9*8)
+#define D_S2 (10*8)
+#define D_S3 (11*8)
+#define D_S4 (12*8)
+#define D_S5 (13*8)
+#define D_S6 (14*8)
+#define DISPATCH_FRAME_SIZE (15*8)
+#define DISPATCH_REGS IM_RA|IM_S0|IM_S1|IM_S2|IM_S3|IM_S4|IM_S5|IM_S6
+
+NESTED(prom_dispatch,5,DISPATCH_FRAME_SIZE,ra,DISPATCH_REGS,0)
+
+ ldgp gp,0(pv)
+
+ lda sp,-DISPATCH_FRAME_SIZE(sp)
+ stq ra,D_RA(sp)
+ stq s0,D_S0(sp)
+ stq s1,D_S1(sp)
+ stq s2,D_S2(sp)
+ stq s3,D_S3(sp)
+ stq s4,D_S4(sp)
+ stq s5,D_S5(sp)
+ stq s6,D_S6(sp)
+
+ /* Lord have mercy because.. I would not. */
+#define STUPID_PROM_IS_32_BITS (defined(ADU) || defined(FLAMINGO))
+
+
+#if STUPID_PROM_IS_32_BITS
+
+ ldah s0,0x2000(zero) /* hack for hack */
+ lda s0,(0x2000-8)(s0)
+
+ stq sp,0(s0)
+ or s0,zero,sp
+#endif /* STUPID_PROM_IS_32_BITS */
+
+
+ lda pv,prom_dispatch_v
+ ldq v0,0(pv) /* routine */
+ ldq pv,8(pv) /* routine_arg */
+
+ jsr ra,(v0)
+
+
+#if STUPID_PROM_IS_32_BITS
+
+ ldah s0,0x2000(zero) /* hack for hack */
+ lda s0,(0x2000-8)(s0)
+
+ ldq sp,0(s0)
+#endif /* STUPID_PROM_IS_32_BITS */
+
+
+ ldq ra,D_RA(sp)
+ ldq s0,D_S0(sp)
+ ldq s1,D_S1(sp)
+ ldq s2,D_S2(sp)
+ ldq s3,D_S3(sp)
+ ldq s4,D_S4(sp)
+ ldq s5,D_S5(sp)
+ ldq s6,D_S6(sp)
+ lda sp,DISPATCH_FRAME_SIZE(sp)
+ RET
+
+ END(prom_dispatch)
+
+
+/*
+ * Return to prom
+ */
+LEAF(prom_halt,0)
+ call_pal op_halt
+ br zero,prom_halt /* sanity */
+ END(prom_halt)
diff --git a/alpha/alpha/prom_interface.h b/alpha/alpha/prom_interface.h
new file mode 100644
index 00000000..fc2499db
--- /dev/null
+++ b/alpha/alpha/prom_interface.h
@@ -0,0 +1,322 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: prom_interface.h,v $
+ * Revision 2.4 93/08/10 15:15:37 mrt
+ * "..all the systems implemented the fields backwards, so that
+ * " MAJOR and MINOR were switched. The SRM was changed to match.."
+ * Consequently, we switched our definition in the struct per_cpu_slot.
+ * [93/08/06 af]
+ *
+ * Revision 2.3 93/03/09 10:50:46 danner
+ * Changed prom dispatching to be more link-safe.
+ * [93/02/20 af]
+ *
+ * Revision 2.2 93/02/05 07:59:57 danner
+ * Working version, taken from boot directory.
+ * [93/02/04 00:57:09 af]
+ *
+ * Added reference to doc for the HWRPB &co.
+ * [92/12/22 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:17:51 af]
+ *
+ * Created.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: prom_interface.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Functions and data structures that link the kernel
+ * to the prom environment.
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "VMS for Alpha Platforms Internals and Data Structures"
+ * Digital Press 1992, Burlington, MA 01803
+ * Order number EY-L466E-P1/2, ISBN 1-55558-095-5
+ * [Especially volume 1, chapter 33 "Bootstrap processing"]
+ */
+
+/*
+ * In the MI code we only need halt, reboot, putchar, getenv
+ */
+#ifndef ASSEMBLER
+
+/*
+ * This is set up in init_prom_interface
+ */
+extern int alpha_console;
+
+extern struct {
+ int (*routine)();
+ struct console_routine_descriptor *routine_arg;
+} prom_dispatch_v;
+
+
+void prom_halt();
+void prom_reboot();
+integer_t prom_putchar( char );
+char *prom_getenv( char*);
+
+/*
+ * The (complicated) return value from a prom call
+ */
+typedef union {
+ struct {
+ unsigned long
+ retval : 32,
+ unit : 8,
+ mbz : 8,
+ error : 13,
+ status : 3;
+ } u;
+ long bits;
+} prom_return_t;
+
+
+#endif ASSEMBLER
+
+/*
+ * Callback codes
+ */
+#define PROM_R_GETC 0x1
+#define PROM_R_PUTS 0x2 /* puts to console */
+#define PROM_R_SETENV 0x20 /* for reboot */
+#define PROM_R_GETENV 0x22
+
+
+/*
+ * What you can do with it
+ */
+
+ /* gets a character from console device no. X
+ ok status is 0 or 1 */
+#define prom_getc(x) prom_dispatch( PROM_R_GETC, x)
+
+ /* Print string Y of length Z on console no X
+ ok status is 0 or 1 */
+#define prom_puts(x,y,z) prom_dispatch( PROM_R_PUTS, x, y, z)
+
+ /* Copy environment variable X in buffer Y of length Z
+ ok status is 0 or 1 */
+#define prom_getenv(x,y,z) prom_dispatch( PROM_R_GETENV, x, y, z)
+
+ /* Change value of environment variable X to value Y of length Z
+ ok status is 0 */
+#define prom_setenv(x,y,z) prom_dispatch( PROM_R_SETENV, x, y, z)
+
+/*
+ * Which of the mandatory environment variables we need
+ */
+
+#define PROM_E_AUTO_ACTION 0x1 /* for reboot ? */
+#define PROM_E_BOOTED_DEV 0x4
+#define PROM_E_BOOTED_OSFLAGS 0x8
+#define PROM_E_TTY_DEV 0xf
+
+
+/*
+ * Restart block -- monitor support for "warm" starts
+ */
+
+#define RESTART_ADDR 0x10000000 /* prom restart block (virtual, at boot) */
+#define RESTART_CSUMCNT 0xc8 /* chksum this many bytes, as longs */
+
+#ifndef ASSEMBLER
+
+struct restart_blk {
+ vm_offset_t my_phys_address;
+ char my_name[8]; /* "HWRPB" (magic number) */
+ natural_t my_version;
+ vm_size_t my_size;
+ natural_t primary_cpu_id;
+ vm_size_t page_size;
+ natural_t valid_phys_bits;
+ natural_t maximum_asn;
+ char system_serial_number[16];
+ natural_t system_type;
+ natural_t system_variation;
+ char system_revision[8]; /* first 4 valid */
+ vm_size_t clock_interrupt_frequency;
+ vm_size_t cycle_counter_resolution;
+ vm_offset_t virtual_pte_base;
+ integer_t reserved[1];
+ vm_offset_t tb_hint_block_offset;
+ natural_t num_processors;
+ vm_size_t percpu_slot_size;
+ vm_offset_t percpu_slots_offset;
+ vm_size_t ctb_count; /* 'console terminal block' */
+ vm_size_t ctb_size;
+ vm_offset_t ctb_offset;
+ vm_offset_t console_routine_block_offset;
+ vm_offset_t memory_data_descriptor_table_offset;
+ vm_offset_t config_data_block_offset;
+ vm_offset_t FRU_table_offset;
+ integer_t (*save_term_routine)();
+ integer_t save_term_routine_pv; /* procedure value */
+ integer_t (*restore_term_routine)();
+ integer_t restore_term_routine_pv;
+ integer_t (*restart_routine)();
+ integer_t restart_routine_pv;
+ integer_t reserved_for_os;
+ integer_t reserved_for_hw;
+ integer_t checksum;
+ integer_t ready_bitmasks[2]; /* VARSIZE */
+};
+
+#ifdef KERNEL
+extern struct restart_blk *alpha_hwrpb;
+#endif /* KERNEL */
+
+/*
+ * Defined system types
+ */
+#define SYSTEM_TYPE_ADU 1
+#define SYSTEM_TYPE_COBRA 2
+#define SYSTEM_TYPE_RUBY 3
+#define SYSTEM_TYPE_FLAMINGO 4
+#define SYSTEM_TYPE_MANNEQUIN 5
+#define SYSTEM_TYPE_JENSEN 6
+
+/*
+ * System variation bitfields
+ */
+
+#define SYSTEM_VAR_MPCAP 0x1 /* isa multiprocessor */
+
+#define SYSTEM_VAR_CONSOLE 0x1e /* what sort of console hw */
+# define SYSTEM_VAR_CNSL_DETACHED 0x2
+# define SYSTEM_VAR_CNSL_EMBEDDED 0x4
+
+#define SYSTEM_VAR_POWERFAIL 0xe0 /* powerfail provisions */
+# define SYSTEM_VAR_PF_UNITED 0x20
+# define SYSTEM_VAR_PF_SEPARATE 0x40
+# define SYSTEM_VAR_PF_BBACKUP 0x60
+
+#define SYSTEM_VAR_PF_ACTION 0x100 /* 1 -> restart all processors
+ on powerfail, 0 -> only primary */
+#define SYSTEM_VAR_GRAPHICS 0x200 /* do we have a graphic engine */
+#define SYSTEM_VAR_mbz 0xfffffffffffffc00
+
+struct console_routine_descriptor {
+ integer_t descriptor;
+ int (*code)();
+};
+
+struct console_routine_blk {
+
+ struct console_routine_descriptor
+ *dispatch_func_desc;
+ vm_offset_t dispatch_func_phys;
+
+ integer_t other_stuff[1]; /* which we do not care */
+};
+
+struct memory_data_descriptor_table {
+ integer_t checksum;
+ vm_offset_t implementation_specific_table_address; /* phys */
+ vm_size_t num_clusters;
+ struct mem_cluster {
+ vm_offset_t first_pfn;
+ vm_size_t num_pfn;
+ vm_size_t num_tested;
+ vm_offset_t bitmap_v_address;
+ vm_offset_t bitmap_p_address;
+ integer_t checksum;
+ integer_t usage;
+ } mem_clusters[1];
+};
+
+struct per_cpu_slot {
+ char hwpcb[128]; /* pal-dep */
+ natural_t state_flags;
+ vm_size_t palcode_memsize;
+ vm_size_t palcode_scratchsize;
+ vm_offset_t palcode_memory;
+ vm_offset_t palcode_scratch;
+ natural_t palcode_revision_info;
+ unsigned int processor_major_type;
+ unsigned int processor_minor_type;
+ natural_t processor_variation;
+ char processor_revision[8]; /* first 4 valid */
+ char processor_serial_number[16]; /* first 10 valid */
+ vm_offset_t logout_area;
+ vm_size_t logout_area_length;
+ vm_offset_t halt_pcbb; /* phys of PCB at halt */
+ vm_offset_t halt_pc;
+ natural_t halt_ps;
+ natural_t halt_r25;
+ natural_t halt_r26;
+ natural_t halt_r27;
+ natural_t halt_reason;
+ natural_t reserved_sw;
+ char mp_console_area[168]; /* +296d */
+ char architecture_specific[48]; /* +464d */
+ /* Total size 512 bytes minimum, trust hwrpb->slot_size */
+};
+
+/* State flags */
+
+#define PSTATE_BIP 0x1 /* boot in progress */
+#define PSTATE_RC 0x2 /* restart capable */
+#define PSTATE_PA 0x4 /* processor available */
+#define PSTATE_PP 0x8 /* processor present */
+#define PSTATE_OH 0x10 /* operator halted */
+#define PSTATE_CV 0x20 /* context valid */
+#define PSTATE_PV 0x40 /* palcode valid */
+#define PSTATE_PMV 0x80 /* palcode memory valid */
+#define PSTATE_PL 0x100 /* palcode loaded */
+#define PSTATE_HALT_REQ 0xff0000
+# define PSTATE_H_DEFAULT 0x000000
+# define PSTATE_H_SAVE_EXIT 0x010000
+# define PSTATE_H_COLD_BOOT 0x020000
+# define PSTATE_H_WARM_BOOT 0x030000
+# define PSTATE_H_STAY_HALTED 0x040000
+#define PSTATE_mbz 0xffffffffff00fe00
+
+/* Halt reasons */
+
+#define PHALT_START 0
+#define PHALT_SYSCRASH 1
+#define PHALT_KSTACK_INVALID 2
+#define PHALT_SCBB_INVALID 3
+#define PHALT_PTBR_INVALID 4
+#define PHALT_FROM_KERNEL 5
+#define PHALT_DOUBLE_ABORT 6
+
+#endif ASSEMBLER
+
+
+
+
diff --git a/alpha/alpha/prom_routines.S b/alpha/alpha/prom_routines.S
new file mode 100644
index 00000000..21831ad7
--- /dev/null
+++ b/alpha/alpha/prom_routines.S
@@ -0,0 +1,125 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: prom_routines.s,v $
+ * Revision 2.4 93/05/20 21:03:04 mrt
+ * Changed use of zero to ra in call to NESTED.
+ * [93/05/18 mrt]
+ *
+ * Revision 2.3 93/03/09 10:49:42 danner
+ * Different prom dispatching, link safe.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/02/05 08:01:14 danner
+ * Created a while back.
+
+ * [93/02/04 af]
+ *
+ */
+/*
+ * File: prom_routines.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/92
+ *
+ * PROM entrypoints of interest to kernel
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "VMS for Alpha Platforms Internals and Data Structures"
+ * Digital Press 1992, Burlington, MA 01803
+ * Order number EY-L466E-P1/2, ISBN 1-55558-095-5
+ * [Especially volume 1, chapter 33 "Bootstrap processing"]
+ */
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+
+ .globl prom_dispatch_v
+ .comm prom_dispatch_v 16
+
+ .text
+ .align 4
+
+/*
+ * Dispatcher routine. Independent of prom's calling standard,
+ * saves our callee-saved registers as required by C.
+ */
+#define D_RA (7*8)
+#define D_S0 (8*8)
+#define D_S1 (9*8)
+#define D_S2 (10*8)
+#define D_S3 (11*8)
+#define D_S4 (12*8)
+#define D_S5 (13*8)
+#define D_S6 (14*8)
+#define DISPATCH_FRAME_SIZE (15*8)
+#define DISPATCH_REGS IM_RA|IM_S0|IM_S1|IM_S2|IM_S3|IM_S4|IM_S5|IM_S6
+
+NESTED(prom_dispatch,5,DISPATCH_FRAME_SIZE,ra,DISPATCH_REGS,0)
+
+ ldgp gp,0(pv)
+
+ lda sp,-DISPATCH_FRAME_SIZE(sp)
+ stq ra,D_RA(sp)
+ stq s0,D_S0(sp)
+ stq s1,D_S1(sp)
+ stq s2,D_S2(sp)
+ stq s3,D_S3(sp)
+ stq s4,D_S4(sp)
+ stq s5,D_S5(sp)
+ stq s6,D_S6(sp)
+
+ lda pv,prom_dispatch_v
+ ldq v0,0(pv) /* routine */
+ ldq pv,8(pv) /* routine_arg */
+
+ jsr ra,(v0)
+
+ ldq ra,D_RA(sp)
+ ldq s0,D_S0(sp)
+ ldq s1,D_S1(sp)
+ ldq s2,D_S2(sp)
+ ldq s3,D_S3(sp)
+ ldq s4,D_S4(sp)
+ ldq s5,D_S5(sp)
+ ldq s6,D_S6(sp)
+ lda sp,DISPATCH_FRAME_SIZE(sp)
+ RET
+
+ END(prom_dispatch)
+
+
+/*
+ * Return to prom
+ */
+LEAF(prom_halt,0)
+ call_pal op_halt
+ END(prom_halt)
+
diff --git a/alpha/alpha/prom_routines.h b/alpha/alpha/prom_routines.h
new file mode 100644
index 00000000..bc069395
--- /dev/null
+++ b/alpha/alpha/prom_routines.h
@@ -0,0 +1,83 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: prom_routines.h,v $
+ * Revision 2.2 93/02/05 08:01:07 danner
+ * Created a while back.
+ * [93/02/04 af]
+ *
+ */
+/*
+ * File: prom_routines.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/90
+ *
+ * PROM callback interface
+ */
+
+#ifndef _BOOT_PROM_ROUTINES_H_
+#define _BOOT_PROM_ROUTINES_H_ 1
+
+#include "../prom_interface.h"
+
+/*
+ * Which of the mandatory callbacks we need..
+ * besides what the kernel uses
+ */
+#define PROM_R_OPEN 0x10
+#define PROM_R_CLOSE 0x11
+#define PROM_R_READ 0x13
+
+/*
+ * The (extra) prom calls
+ */
+ /* Opens device X (a string of Y bytes)
+ ok status is 0 */
+#define prom_open(x,y) prom_dispatch( PROM_R_OPEN, x, y)
+
+ /* Close channel X
+ ok status is 0 */
+#define prom_close(x) prom_dispatch( PROM_R_CLOSE, x)
+
+ /* Read from channel X Y bytes at address W, blockno Z
+ ok status is 0 */
+#define prom_read(x,y,w,z) prom_dispatch( PROM_R_READ, x, y, w, z)
+
+/*
+ * A more friendly approach
+ */
+extern int console;
+#define puts(s) prom_puts(console, s, sizeof(s)-1)
+extern void putnum( unsigned long int );
+
+/*
+ * Which other of the mandatory environment variables we need
+ */
+
+#define PROM_E_BOOTED_FILE 0x6
+
+#endif /* _BOOT_PROM_ROUTINES_H_ */
diff --git a/alpha/alpha/setjmp.h b/alpha/alpha/setjmp.h
new file mode 100644
index 00000000..1f98e9ec
--- /dev/null
+++ b/alpha/alpha/setjmp.h
@@ -0,0 +1,48 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: setjmp.h,v $
+ * Revision 2.2 93/01/14 17:14:10 danner
+ * Copied from mips.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * File: setjmp.h
+ * Author: David Golub, Carnegie Mellon University
+ * Date: 7/90
+ *
+ * Common name for setjmp/longjmp buffer.
+ */
+
+#ifndef _ALPHA_SETJMP_H_
+#define _ALPHA_SETJMP_H_
+
+#include <alpha/context.h> /* It's defined here */
+typedef jmp_buf jmp_buf_t; /* The expected name */
+
+#endif _ALPHA_SETJMP_H_
diff --git a/alpha/alpha/start.S b/alpha/alpha/start.S
new file mode 100644
index 00000000..a756ddb2
--- /dev/null
+++ b/alpha/alpha/start.S
@@ -0,0 +1,950 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: start.s,v $
+ * Revision 2.5 93/05/20 21:01:41 mrt
+ * Changed use of zero to ra in call to NESTED.
+ * [93/05/18 mrt]
+ *
+ * Revision 2.4 93/05/17 18:17:03 mrt
+ * Initialize stack pointer. Used if starting from console
+ * with start command. New firmware starts with sp set to 0.
+ * Fix from Michael Uhlenberg.
+ * [93/05/17 mrt]
+ *
+ * Revision 2.3 93/03/09 10:50:53 danner
+ * GP setup was wrong under GCC.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/02/05 07:59:50 danner
+ * Change all mov calls to or calls due to strange chip bug
+ * on Flamingo with pass 2 chip
+ * [93/01/12 jeffreyh]
+ * Changed ADU-specific way to setup k0seg mappings into
+ * MI way to do it, using ldqp/stqp pal calls.
+ * [93/01/15 af]
+ * Got more memory on the ADU, bump NLEV3 up.
+ * [92/12/25 01:41:26 af]
+ *
+ * Added reference to doc for the HWRPB &co.
+ * [92/12/22 af]
+ * Added reference to documentation source(s).
+ * [92/12/16 15:18:08 af]
+ *
+ * Created.
+ * [92/06/03 af]
+ *
+ */
+
+/*
+ * File: start.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Kernel entry point
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ *
+ * "VMS for Alpha Platforms Internals and Data Structures"
+ * Digital Press 1992, Burlington, MA 01803
+ * Order number EY-L466E-P1/2, ISBN 1-55558-095-5
+ * [Especially volume 1, chapter 33 "Bootstrap processing"]
+ */
+#include <cpus.h>
+#include <mach_kdb.h>
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+#include <alpha/trap.h>
+#include <alpha/alpha_scb.h>
+#include <alpha/prom_interface.h>
+#include <mach/alpha/vm_param.h>
+
+#include "assym.s"
+
+/*
+ * Object:
+ * alpha_scb EXPORTED SCB
+ *
+ * Exception and interrupt dispatching table.
+ *
+ * Since the SCB must be 8kpage aligned.. here it goes as well.
+ */
+
+ .globl TRAP_generic
+ .globl TRAP_interrupt
+ .globl stray_trap
+ .globl stray_interrupt
+
+#define t(code) .quad TRAP_generic,code
+#define i(code) .quad TRAP_interrupt,stray_interrupt
+#define u(code) .quad TRAP_interrupt,stray_trap
+
+ .globl TRAP_dispatcher
+
+#define d(handler) \
+ .globl handler;\
+ .quad TRAP_dispatcher,handler
+
+ .text
+ .set noreorder
+ .set noat
+
+ .globl alpha_scb
+
+alpha_scb:
+ /*
+ * On the ADU, this ends up being the kernel entry point.
+ * So we branch to the right place, and later on fix
+ * this first entry. Which is unused anyways.
+ */
+/*000 u(0) */
+ nop
+ br zero,start
+ .quad 0
+/*010*/
+ t(T_FPA_DISABLED)
+ u(2)
+ u(3)
+ u(4)
+ u(5)
+ u(6)
+ u(7)
+ t(T_PROT_FAULT)
+ t(T_TRANS_INVALID)
+ t(T_READ_FAULT)
+ t(T_WRITE_FAULT)
+ t(T_EXECUTE_FAULT)
+ u(13)
+ u(14)
+ u(15)
+/*100*/ u(16)
+ u(17)
+ u(18)
+ u(19)
+ u(20)
+ u(21)
+ u(22)
+ u(23)
+ u(24)
+ u(25)
+ u(26)
+ u(27)
+ u(28)
+ u(29)
+ u(30)
+ u(31)
+/*200*/ t(T_ARITHMETIC)
+ u(33)
+ u(34)
+ u(35)
+ t(T_AST_K)
+ t(T_AST_E)
+ t(T_AST_S)
+ t(T_AST_U)
+ t(T_UNALIGNED)
+ u(41)
+ u(42)
+ u(43)
+ u(44)
+ u(45)
+ u(46)
+ u(47)
+/*300*/ u(48)
+ u(49)
+ u(50)
+ u(51)
+ u(52)
+ u(53)
+ u(54)
+ u(55)
+ u(56)
+ u(57)
+ u(58)
+ u(59)
+ u(60)
+ u(61)
+ u(62)
+ u(63)
+#if MACH_KDB
+/*400*/ d(kdb_breakpoint)
+#else
+/*400*/ t(T_BP)
+#endif
+ t(T_BUG)
+ t(T_ILL)
+ t(T_PAL)
+ u(68) /* generate software trap ?? */
+ u(69)
+ u(70)
+ u(71)
+ t(T_CHMK)
+ t(T_CHME)
+ t(T_CHMS)
+ t(T_CHMU)
+ u(76)
+ u(77)
+ u(78)
+ u(79)
+/*500*/ u(80)
+ i( /* Software interrupt level 1, entry # */ SCB_SOFTCLOCK )
+ i( /* Software interrupt level 2, entry # */ 82 )
+ i( /* Software interrupt level 3, entry # */ 83 )
+ i( /* Software interrupt level 4, entry # */ 84 )
+ i( /* Software interrupt level 5, entry # */ 85 )
+ i( /* Software interrupt level 6, entry # */ 86 )
+ i( /* Software interrupt level 7, entry # */ 87 )
+ i( /* Software interrupt level 8, entry # */ 88 )
+ i( /* Software interrupt level 9, entry # */ 89 )
+ i( /* Software interrupt level 10, entry # */ 90 )
+ i( /* Software interrupt level 11, entry # */ 91 )
+ i( /* Software interrupt level 12, entry # */ 92 )
+ i( /* Software interrupt level 13, entry # */ 93 )
+ i( /* Software interrupt level 14, entry # */ 94 )
+ i( /* Software interrupt level 15, entry # */ 95 )
+/*600*/ i( /* Interval clock interrupt */ SCB_CLOCK )
+ i( /* Interprocessor interrupt */ SCB_INTERPROC )
+ t(T_SCE)
+ t(T_PCE)
+ t(T_PFAIL)
+ u(101) /* perfmon ?? */
+ t(T_SCHECK)
+ t(T_PCHECK)
+ u(104)
+ u(105)
+ u(106)
+ u(107)
+ u(108)
+ u(109)
+ u(110)
+ u(111) /* passive release ?? */
+/*700*/ u(112)
+ u(113)
+ u(114)
+ u(115)
+ u(116)
+ u(117)
+ u(118)
+ u(119)
+ u(120)
+ u(121)
+ u(122)
+ u(123)
+ u(124)
+ u(125)
+ u(126)
+ u(127)
+/*800*/ i(0)
+ i(1)
+ i(2)
+ i(3)
+ i(4)
+ i(5)
+ i(6)
+ i(7)
+ i(8)
+ i(9)
+ i(10)
+ i(11)
+ i(12)
+ i(13)
+ i(14)
+ i(15)
+/*900*/ i(16)
+ i(17)
+ i(18)
+ i(19)
+ i(20)
+ i(21)
+ i(22)
+ i(23)
+ i(24)
+ i(25)
+ i(26)
+ i(27)
+ i(28)
+ i(29)
+ i(30)
+ i(31)
+/*a00*/ i(32)
+ i(33)
+ i(34)
+ i(35)
+ i(36)
+ i(37)
+ i(38)
+ i(39)
+ i(40)
+ i(41)
+ i(42)
+ i(43)
+ i(44)
+ i(45)
+ i(46)
+ i(47)
+/*b00*/ i(48)
+ i(49)
+ i(50)
+ i(51)
+ i(52)
+ i(53)
+ i(54)
+ i(55)
+ i(56)
+ i(57)
+ i(58)
+ i(59)
+ i(60)
+ i(61)
+ i(62)
+ i(63)
+/*c00*/ i(64)
+ i(65)
+ i(66)
+ i(67)
+ i(68)
+ i(69)
+ i(70)
+ i(71)
+ i(72)
+ i(73)
+ i(74)
+ i(75)
+ i(76)
+ i(77)
+ i(78)
+ i(79)
+/*d00*/ i(80)
+ i(81)
+ i(82)
+ i(83)
+ i(84)
+ i(85)
+ i(86)
+ i(87)
+ i(88)
+ i(89)
+ i(90)
+ i(91)
+ i(92)
+ i(93)
+ i(94)
+ i(95)
+/*e00*/ i(96)
+ i(97)
+ i(98)
+ i(99)
+ i(100)
+ i(101)
+ i(102)
+ i(103)
+ i(104)
+ i(105)
+ i(106)
+ i(107)
+ i(108)
+ i(109)
+ i(110)
+ i(111)
+/*f00*/ i(112)
+ i(113)
+ i(114)
+ i(115)
+ i(116)
+ i(117)
+ i(118)
+ i(119)
+ i(120)
+ i(121)
+ i(122)
+ i(123)
+ i(124)
+ i(125)
+ i(126)
+ i(127)
+/*1000*/ i(128)
+ i(129)
+ i(130)
+ i(131)
+ i(132)
+ i(133)
+ i(134)
+ i(135)
+ i(136)
+ i(137)
+ i(138)
+ i(139)
+ i(140)
+ i(141)
+ i(142)
+ i(143)
+/*1100*/ i(144)
+ i(145)
+ i(146)
+ i(147)
+ i(148)
+ i(149)
+ i(150)
+ i(151)
+ i(152)
+ i(153)
+ i(154)
+ i(155)
+ i(156)
+ i(157)
+ i(158)
+ i(159)
+/*1200*/ i(160)
+ i(161)
+ i(162)
+ i(163)
+ i(164)
+ i(165)
+ i(166)
+ i(167)
+ i(168)
+ i(169)
+ i(170)
+ i(171)
+ i(172)
+ i(173)
+ i(174)
+ i(175)
+/*1300*/ i(176)
+ i(177)
+ i(178)
+ i(179)
+ i(180)
+ i(181)
+ i(182)
+ i(183)
+ i(184)
+ i(185)
+ i(186)
+ i(187)
+ i(188)
+ i(189)
+ i(190)
+ i(191)
+/*1400*/ i(192)
+ i(193)
+ i(194)
+ i(195)
+ i(196)
+ i(197)
+ i(198)
+ i(199)
+ i(200)
+ i(201)
+ i(202)
+ i(203)
+ i(204)
+ i(205)
+ i(206)
+ i(207)
+/*1500*/ i(208)
+ i(209)
+ i(210)
+ i(211)
+ i(212)
+ i(213)
+ i(214)
+ i(215)
+ i(216)
+ i(217)
+ i(218)
+ i(219)
+ i(220)
+ i(221)
+ i(222)
+ i(223)
+/*1600*/ i(224)
+ i(225)
+ i(226)
+ i(227)
+ i(228)
+ i(229)
+ i(230)
+ i(231)
+ i(232)
+ i(233)
+ i(234)
+ i(235)
+ i(236)
+ i(237)
+ i(238)
+ i(239)
+/*1700*/ i(240)
+ i(241)
+ i(242)
+ i(243)
+ i(244)
+ i(245)
+ i(246)
+ i(247)
+ i(248)
+ i(249)
+ i(250)
+ i(251)
+ i(252)
+ i(253)
+ i(254)
+ i(255)
+/*1800*/ i(256)
+ i(257)
+ i(258)
+ i(259)
+ i(260)
+ i(261)
+ i(262)
+ i(263)
+ i(264)
+ i(265)
+ i(266)
+ i(267)
+ i(268)
+ i(269)
+ i(270)
+ i(271)
+/*1900*/ i(272)
+ i(273)
+ i(274)
+ i(275)
+ i(276)
+ i(277)
+ i(278)
+ i(279)
+ i(280)
+ i(281)
+ i(282)
+ i(283)
+ i(284)
+ i(285)
+ i(286)
+ i(287)
+/*1a00*/ i(288)
+ i(289)
+ i(290)
+ i(291)
+ i(292)
+ i(293)
+ i(294)
+ i(295)
+ i(296)
+ i(297)
+ i(298)
+ i(299)
+ i(300)
+ i(301)
+ i(302)
+ i(303)
+/*1b00*/ i(304)
+ i(305)
+ i(306)
+ i(307)
+ i(308)
+ i(309)
+ i(310)
+ i(311)
+ i(312)
+ i(313)
+ i(314)
+ i(315)
+ i(316)
+ i(317)
+ i(318)
+ i(319)
+/*1c00*/ i(320)
+ i(321)
+ i(322)
+ i(323)
+ i(324)
+ i(325)
+ i(326)
+ i(327)
+ i(328)
+ i(329)
+ i(330)
+ i(331)
+ i(332)
+ i(333)
+ i(334)
+ i(335)
+/*1d00*/ i(336)
+ i(337)
+ i(338)
+ i(339)
+ i(340)
+ i(341)
+ i(342)
+ i(343)
+ i(344)
+ i(345)
+ i(346)
+ i(347)
+ i(348)
+ i(349)
+ i(350)
+ i(351)
+/*1e00*/ i(352)
+ i(353)
+ i(354)
+ i(355)
+ i(356)
+ i(357)
+ i(358)
+ i(359)
+ i(360)
+ i(361)
+ i(362)
+ i(363)
+ i(364)
+ i(365)
+ i(366)
+ i(367)
+/*1f00*/ i(368)
+ i(369)
+ i(370)
+ i(371)
+ i(372)
+ i(373)
+ i(374)
+ i(375)
+ i(376)
+ i(377)
+ i(378)
+ i(379)
+ i(380)
+ i(381)
+ i(382)
+ i(383)
+EXPORT(end_alpha_scb)
+
+/*
+ * Object:
+ * root_kpdes EXPORTED PTEs
+ *
+ * Kernel's root pagetable (seg1)
+ *
+ * This also needs aligned.
+ */
+
+ .globl root_kpdes
+root_kpdes:
+ .space ALPHA_PGBYTES
+
+/*
+ * Object:
+ * spage_ptes LOCAL PTEs
+ *
+ * Superpage mappings
+ *
+ * This also needs aligned.
+ */
+
+spage_ptes_2:
+ .space ALPHA_PGBYTES /* lev 2 */
+spage_ptes_3:
+ /* enough for 256 meg */
+#define NLEV3 32
+ .space 32*ALPHA_PGBYTES
+/*
+ * Should use NLEV3 but the gcc preprocessor puts in a space
+ * after NLEV that the gcc as can't handle
+ *
+ * .space NLEV3*ALPHA_PGBYTES lev 3
+ */
+
+/*
+ * Object:
+ * boot_pcb EXPORTED PCB
+ *
+ * Initial HW pcb structure.
+ *
+ * This also needs aligned.
+ */
+
+ .globl boot_pcb
+boot_pcb:
+ .space 512
+
+
+
+ .text
+/*
+ * Object:
+ * start EXPORTED function
+ *
+ * Kernel start
+ *
+ * Arguments:
+ * first_free_pfn unsigned long
+ *
+ */
+NESTED(start,1,0,ra,0,0)
+
+ /*
+ * Setup gp pointer
+ */
+ br pv,1f
+1:
+ ldgp gp,0(pv)
+#if __GNU_AS__
+ setgp 0(pv)
+#endif
+ lda sp,0x20010000 /* for the moment ? */
+
+
+#if (NCPUS > 1)
+ br s5,1f
+ .globl processors_running
+processors_running: /* s5 points here */
+ .space (NCPUS*4)
+1:
+
+ /* Decide here who the primary is, cuz that is
+ the only one that can talk to the console */
+
+ call_pal op_mfpr_whami
+ s4addq v0,s5,s5
+ addq zero,1,a0 /* proc #0 --> 1 */
+ stl a0,0(s5)
+
+ lda a0,RESTART_ADDR /* HWRPB */
+ ldq s4,HWRPB_PRIMARY_ID(a0)
+ subq v0,s4,s4
+ beq s4,1f
+
+ /* secondary spins here a bit, to let primary set pagetables */
+ ldah t0,0x7ff(zero)
+aa: lda t0,-1(t0)
+ bne t0,aa
+ br zero,all_cpus
+1:
+#endif /* NCPUS > 1 */
+
+ /*
+ * Make sure the superpage is working for us
+ */
+
+ /* This is needed on the ADU with EV4-pass1 chips,
+ no superpage and VMS pal does not emulate it.
+ So we setup the 1:1 mappings of the superpage */
+
+ /* get PT base */
+ call_pal op_mfpr_ptbr
+ sll v0,13,s1 /* PFN -> phys */
+
+ /* find PFN of spage_ptes_2 */
+ lda t0,spage_ptes_2 /* assumes GP ok */
+ zap t0,0xe0,s2 /* k0seg -> phys */
+ srl s2,13,t0 /* phys -> PFN */
+
+ /* build a pte */
+ lda t1,0x1111(zero) /* protections */
+ sll t0,32,t0 /* shift PFN into place */
+ addq t0,t1,a1 /* pte complete */
+ lda a0,(8*0x200)(s1) /* pdenum(fffffc00..00) */
+ call_pal op_stqp /* 0x4 */
+ /* mapped lev2 page, now on to lev3 */
+
+ /* find PFN of spage_ptes_3 */
+ lda t0,spage_ptes_3
+ zap t0,0xe0,s3 /* k0seg -> phys */
+ srl s3,13,t0 /* phys -> PFN */
+
+ /* now fill in lev2 page */
+ lda t3,NLEV3(zero)
+ or s2,zero,a0 /* lev2`s phys */
+flv2:
+ sll t0,32,a1
+ addq a1,t1,a1 /* pte built */
+ call_pal op_stqp /* stick it in */
+
+ subq t3,1,t3 /* one less to go */
+ addq a0,8,a0 /* next ppte */
+ addq t0,1,t0 /* next PFN */
+ bne t3,flv2 /* mapped all lev2 pages ? */
+
+ /* now fill in lev3 pages */
+ lda t3,NLEV3(zero)
+ sll t3,10,t3 /* nptes to fill */
+ lda t0,0(zero) /* PFN==0 */
+ or s3,zero,a0 /* lev3`s phys */
+flv3:
+ sll t0,32,a1
+ addq a1,t1,a1 /* pte built */
+ call_pal op_stqp
+
+ subq t3,1,t3 /* done this one */
+ addq a0,8,a0 /* ppte++ */
+ addq t0,1,t0 /* PFN++ */
+ bne t3,flv3 /* mapped all lev3 pages ? */
+
+ /* Done mapping 1:1
+ Now, since we'll be using root_kpdes as
+ our lev1, copy entries over in anticipation
+ of context-switching soon afterwards */
+
+ lda t0,root_kpdes
+ lda t2,ALPHA_PGBYTES(zero)
+ or s1,zero,a0
+cp:
+ call_pal op_ldqp /* 0x3 */
+ stq v0,0(t0) /* use kseg0 */
+ subq t2,8,t2 /* one entry copied */
+ addq t0,8,t0 /* to_ptep++ */
+ addq a0,8,a0 /* from_ptep++ */
+ bne t2,cp
+
+ /*
+ * Get HWRPB address in k0seg, to be indep of the
+ * mappings the console has setup for us. which
+ * we will get rid of pretty soon (cuz useg)
+ */
+ IMPORT(alpha_hwrpb,8)
+
+ lda a0,RESTART_ADDR
+ ldq a0,0(a0)
+ lda t0,alpha_hwrpb
+ stq a0,0(t0) /* physical! */
+
+all_cpus:
+
+#if 0 /* debug */
+ ldl a0,0(s5)
+ addq a0,1,a0
+ stl a0,0(s5)
+#endif
+
+ /*
+ * Disable FPA (just in case)
+ */
+ or zero,zero,a0
+#if 0
+ /* COMPILER USES FLOATS FOR VARARGS by default. CAVEAT */
+ addq zero,1,a0
+#endif
+ call_pal op_mtpr_fen
+
+ /*
+ * Let processor know where the SCB is
+ */
+ lda a0,alpha_scb
+ /* map to physical .. */
+ zapnot a0,0xf,a0
+ srl a0,13,a0 /* .. pfn */
+ call_pal op_mtpr_scbb
+
+#if (NCPUS > 1)
+ beq s4,1f
+
+ CALL(alpha_slave_start)
+ /* NOTREACHED */
+ call_pal op_halt
+
+1:
+#endif
+
+ /*
+ * Hop onto boot stack
+ */
+ lda sp,bootstack
+
+ /* Let debugger know if we (might) have a non-coff symtab */
+#if __GNU_AS__
+ addq zero,1,a0
+#else
+ or zero,zero,a0
+#endif
+
+ CALL(alpha_init)
+
+ /* NOTREACHED */
+ call_pal op_halt
+
+ END(start)
+
+#if (NCPUS > 1)
+ .data
+ .globl slave_init_lock
+slave_init_lock: .long 1
+ .text
+ .align 4
+
+/* In assembly because no stack! */
+LEAF(alpha_slave_start,0)
+ ldgp gp,0(pv)
+ or pv,zero,sp /* sanity */
+ lda a0,slave_init_lock
+1: ldq a1,0(a0)
+ bne a1,1b
+ ldq_l a1,0(a0)
+ addq zero,1,a2
+ stq_c a2,0(a0)
+ beq a2,1b
+ /*
+ * Hop onto boot stack, and go
+ */
+ lda sp,bootstack
+#if 1 /* DEBUG */
+ call_pal op_mfpr_whami
+ stq v0,-8(sp)
+ lda sp,-8(sp)
+#endif
+
+ CALL(alpha_slave_init)
+ /* NOTREACHED */
+ call_pal op_halt
+
+ END(alpha_slave_start)
+
+#endif /* NCPUS > 1 */
+
+/*
+ * Object:
+ * bootstack LOCAL stack
+ *
+ * Initial boot stack
+ *
+ */
+
+bootstack_end:
+ .align 4
+ .space 2*8192
+bootstack: .quad 0
+
+
+/*
+ * Compilerfix: BL6 does not get edata right if no .sdata
+ */
+
+ .sdata
+ .long 0
diff --git a/alpha/alpha/supage.S b/alpha/alpha/supage.S
new file mode 100644
index 00000000..1c3075f3
--- /dev/null
+++ b/alpha/alpha/supage.S
@@ -0,0 +1,171 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: supage.s,v $
+ * Revision 2.2 93/02/05 08:01:25 danner
+ * First (working) draft. Still needs to go hunting the shadow
+ * for the ABOX_CTL register and *or* in our bits so that..
+ * [93/02/04 af]
+ *
+ */
+/*
+ * File: supage.s
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/92
+ *
+ * Enable super-page mappings for kernel.
+ */
+
+#if 0/*__osf__*/
+
+#include <machine/asm.h>
+#include <machine/regdef.h>
+#define op_imb 0x86
+#define op_ldqp 0x3
+#define op_stqp 0x4
+
+#else
+
+#include <mach/alpha/asm.h>
+#include <mach/alpha/alpha_instruction.h>
+
+#endif
+
+/*
+ * Object:
+ * enable_suppage EXPORTED routine
+ *
+ * Horrible things to make sure we got it
+ * Arguments:
+ * pal_dispatch_base vm_offset_t
+ *
+ * Takes the phys address of the PAL dispatch vector (PAL_BASE),
+ * drops code down in PAL space to clobber pal_call 10, flush
+ * Icache, invoke downloaded code, restore old code, flush Icache
+ * get back to user. Simple.
+ */
+
+/* ICCSR, Ibox.
+ Enable Ipage through bit 41w (22r)
+ */
+#define iccsr 2
+#define iccsr_map 41
+
+/* ABOX_CTL, Abox.
+ Enable Dpages through bits 4 5
+ */
+#define abox_ctl 14
+#define aboxctl_spe1 4
+
+ .text
+ .globl my_pal_code
+my_pal_code:
+ /* The GNU assembler knows about PAL instructions */
+
+#ifdef __GNU_AS__
+ hw_mfpr/p t0, $iccsr /* get pt2, shadows iccsr */
+ lda t1,0x200(zero) /* iccsr or-val */
+
+ sll t1,32,t1
+ or t0,t1,t0 /* new iccsr value */
+
+ hw_mtpr/pi t0, $iccsr /* enable instrs suppage */
+ lda t2, 0x43e(zero) /* abox_ctl value */
+
+ hw_mtpr/a t2, $abox_ctl /* enable data suppages */
+ hw_rei /* 30d|zero,zero|1|0|0 */
+
+#else
+
+ .long 0x64210082
+ .long 0x205f0200
+ .long 0x48441722
+ .long 0x44220401
+ .long 0x742100a2
+ .long 0x207f043e
+ .long 0x7463004e
+ .long 0x7bff8000
+
+#endif
+
+#define MY_PAL_SIZE 8 /* instructions */
+
+
+/* What to clobber ? Privileged 10d (VMS2OSF) 'course! */
+#define CLOBBERED_CALL 10
+
+ .text
+LEAF(enable_suppage,1)
+ ldgp gp,0(pv)
+ lda sp,-48(sp)
+ stq ra,40(sp)
+ stq s0,32(sp)
+ stq a0,24(sp)
+
+ lda s0,1(zero) # first time through
+do_it_again:
+ ldq a0,24(sp)
+ lda s0,-1(s0)
+
+ /* Decide where to do the load/stores */
+ lda a0,(0x2000+(CLOBBERED_CALL<<6))(a0) # HW dispatch
+ lda a2,my_pal_code
+
+ /* Copy down + save up loop */
+ lda t0,MY_PAL_SIZE(zero)
+cplp:
+ ldq a1,0(a2)
+ lda t0,-2(t0)
+ call_pal op_ldqp # ldqp v0,0(a0)
+ stq v0,0(a2)
+ call_pal op_stqp # stqp a1,0(a0)
+ addq a0,8,a0
+ addq a2,8,a2
+ bgt t0,cplp
+
+ /* Now must flush Icache */
+ mb
+ call_pal op_imb
+
+ bne s0,did_it
+
+ /* Make call now.... Geronimo!! */
+ call_pal CLOBBERED_CALL
+ mb
+ stq t0,16(sp)
+
+ /* Well, we made it. Now do it again for fun */
+ beq s0,do_it_again
+
+did_it:
+ ldq v0,16(sp)
+ ldq s0,32(sp)
+ ldq ra,40(sp)
+ lda sp,48(sp)
+ ret
+ END(enable_suppage)
+
diff --git a/alpha/alpha/thread.h b/alpha/alpha/thread.h
new file mode 100644
index 00000000..56fe47b7
--- /dev/null
+++ b/alpha/alpha/thread.h
@@ -0,0 +1,223 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: thread.h,v $
+ * Revision 2.4 93/03/09 10:51:09 danner
+ * There was no indirection bug, thread->pcb truly is a pointer.
+ * [93/03/05 af]
+ *
+ * Revision 2.3 93/02/04 07:55:19 danner
+ * Missing indirection in user_regs declaration.
+ * [93/02/02 danner]
+ *
+ * Revision 2.2 93/01/14 17:14:28 danner
+ * Created, from mips version.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * File: thread.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 5/92
+ *
+ * This file defines machine specific, thread related structures,
+ * variables and macros.
+ *
+*/
+
+#ifndef _ALPHA_THREAD_H_
+#define _ALPHA_THREAD_H_
+
+#if !defined(ASSEMBLER)
+
+#include <mach/alpha/alpha_instruction.h>
+#include <alpha/context.h>
+#include <alpha/frame.h>
+
+/*
+ * Kernel state. Saved and restored across context-switches
+ * inside the kernel. We can ignore caller-saved registers.
+ * Kept at the base of the thread's stack.
+ */
+
+struct alpha_kernel_state {
+ vm_offset_t s0; /* callee-saved */
+ vm_offset_t s1;
+ vm_offset_t s2;
+ vm_offset_t s3;
+ vm_offset_t s4;
+ vm_offset_t s5;
+ vm_offset_t s6;
+ vm_offset_t sp; /* stack pointer */
+ vm_offset_t pc; /* suspended program counter */
+};
+
+/*
+ * Machine state. Includes all machine registers and other
+ * field used by machine-level code to provide in software
+ * things that architectures other than ALPHA might provide
+ * in hardware, e.g. single-stepping. The FPA state is scheduled
+ * asyncronously and saved here also, on demand. Part of the pcb.
+ * We allocate space for this state as needed.
+ */
+
+struct alpha_sstep_state {
+ int ss_count; /* no. of breakpoints installed */
+ struct breakpoint {
+ vm_offset_t address; /* where */
+ alpha_instruction instruction; /* original inst. */
+ } ss_bp[2]; /* taken/nontaken sides of branch */
+};
+
+struct alpha_machine_state {
+ struct alpha_float_state *mfs; /* see mach/alpha/thread_status.h */
+ struct alpha_sstep_state *msss; /* single-stepping if present */
+};
+
+/*
+ * Saved state. Holds the state of user registers upon kernel entry
+ * (saved in pcb) and kernel registers for exceptions in kernel mode
+ * (saved on kernel stack).
+ */
+
+/* REVISE, BASED ON ACTUAL USE (best WB/CACHE behaviour) */
+struct alpha_saved_state {
+ struct hw_pcb hw_pcb; /* with usp */
+/* wline */
+ struct trap_frame
+ *framep; /* t1-t6, pc, ps */
+ vm_offset_t gp; /* global pointer */
+ vm_offset_t a0; /* argument 0 */
+ vm_offset_t a1; /* argument 1 */
+/* wline */
+ vm_offset_t a2; /* argument 2 */
+ vm_offset_t a3; /* argument 3 */
+ vm_offset_t a4; /* argument 4 */
+ vm_offset_t a5; /* argument 5 */
+/* wline */
+ vm_offset_t ra; /* return address */
+ vm_offset_t v0; /* return value 0 */
+ vm_offset_t t0; /* caller saved 0 */
+ vm_offset_t t7; /* caller saved 7 */
+/* wline */
+ vm_offset_t t8; /* caller saved 8 */
+ vm_offset_t t9; /* caller saved 9 */
+ vm_offset_t t10; /* caller saved 10 */
+ vm_offset_t t11; /* caller saved 11 */
+/* wline */
+ vm_offset_t t12; /* caller saved 12 */
+ vm_offset_t s0; /* callee saved 0 */
+ vm_offset_t s1; /* callee saved 1 */
+ vm_offset_t s2; /* callee saved 2 */
+/* wline */
+ vm_offset_t s3; /* callee saved 3 */
+ vm_offset_t s4; /* callee saved 4 */
+ vm_offset_t s5; /* callee saved 5 */
+ vm_offset_t s6; /* callee saved 6 */
+/* wline */
+ vm_offset_t at; /* assembler temporary */
+ vm_offset_t sp; /* stack pointer (if kernel) */
+ vm_offset_t bad_address; /* bad virtual address */
+ vm_offset_t cause; /* trap cause */
+
+ struct trap_frame
+ saved_frame; /* t1-t6, pc, ps */
+};
+
+/*
+ * At the base of a kernel stack is an "exception link" record.
+ * It contains the C calling sequence's argument save area.
+ * It also contains a pointer to the exception frame (alpha_saved_state).
+ * If the exception happened in user mode, then the exception frame
+ * is in the thread's pcb. If the exception happed in kernel mode,
+ * then the exception frame is further up the kernel stack.
+ */
+struct alpha_exception_link {
+ struct alpha_saved_state *eframe;/* pointer to exception frame */
+ struct trap_frame tf; /* HW saves regs here, and pc+ps */
+};
+
+/*
+ * Lives at the base of a kernel stack.
+ * The full arrangement is
+ * stack: ...
+ * struct alpha_exception_link
+ * struct alpha_kernel_state
+ * struct alpha_stack_base
+ * stack+KERNEL_STACK_SIZE:
+ */
+typedef struct pcb {
+ struct alpha_saved_state mss; /* includes hw_pcb, first! */
+ struct alpha_machine_state mms;
+ /* roundup, cuz HW wants it 128-byte aligned */
+ char pad[ 512 -
+ (sizeof(struct alpha_saved_state) +
+ sizeof(struct alpha_machine_state)) ];
+} *pcb_t; /* exported */
+
+struct alpha_stack_base {
+ vm_offset_t next; /* next stack on free list */
+ struct vm_page *page; /* page structure for this stack */
+ pcb_t pcb; /* pointer to our pcb */
+ /* align, cuz trap_frame will */
+ char pad[64-sizeof(vm_offset_t)-sizeof(struct vm_page*)-sizeof(pcb_t)];
+};
+
+#define USER_REGS(th) ((th)->pcb)
+
+#define STACK_MSB(stack) \
+ ((struct alpha_stack_base *)((stack) + KERNEL_STACK_SIZE) - 1)
+#define STACK_MEL(stack) \
+ ((struct alpha_exception_link *)STACK_MSB(stack) - 1)
+#define STACK_MKS(stack) \
+ ((struct alpha_kernel_state *)STACK_MEL(stack) - 1)
+
+/*
+ * Routine definitions
+ */
+#include <mach/kern_return.h>
+
+void pcb_init(), pcb_terminate(), pcb_collect();
+kern_return_t thread_setstatus(), thread_getstatus();
+void syscall_emulation_sync();
+
+#endif !defined(ASSEMBLER)
+
+/*
+ * Later on..
+ */
+#if 0
+#define current_thread() mfpr_....
+#endif
+
+/*
+ * We have our own alpha-specific implementations of
+ * stack_alloc_try/stack_alloc/stack_free/stack_statistics.
+ */
+#define MACHINE_STACK
+
+#endif _ALPHA_THREAD_H_
diff --git a/alpha/alpha/time_stamp.h b/alpha/alpha/time_stamp.h
new file mode 100644
index 00000000..51d2859e
--- /dev/null
+++ b/alpha/alpha/time_stamp.h
@@ -0,0 +1,44 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: time_stamp.h,v $
+ * Revision 2.2 93/01/14 17:14:33 danner
+ * Created bogus empty file.
+ * [92/05/31 af]
+ *
+ */
+/*
+ * File: time_stamp.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 5/92
+ */
+
+/*
+ * The ALPHA timestamp implementation uses the default, so we don't
+ * need to do anything here.
+ */
+
diff --git a/alpha/alpha/trap.c b/alpha/alpha/trap.c
new file mode 100644
index 00000000..97d96d56
--- /dev/null
+++ b/alpha/alpha/trap.c
@@ -0,0 +1,935 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1994,1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS-IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon the
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * 6-Jul-94 David Golub (dbg) at Carnegie-Mellon University
+ * Added extern declaration of exception(), so that we do
+ * not lose the high 32 bits of a bad virtual address.
+ *
+ * $Log: trap.c,v $
+ * Revision 2.7 93/05/15 19:11:41 mrt
+ * machparam.h -> machspl.h
+ *
+ * Revision 2.6 93/03/09 10:51:42 danner
+ * Proto for Thread_syscall_return with GCC.
+ * [93/03/07 af]
+ * Fixed pcsample botch.
+ * [93/03/05 af]
+ *
+ * Revision 2.5 93/02/05 08:00:14 danner
+ * Added machine check handler (jeffreyh).
+ * [93/02/04 00:45:20 af]
+ *
+ * Revision 2.4 93/02/04 07:55:25 danner
+ * Added pc_sampling support
+ * [93/02/02 danner]
+ *
+ * Revision 2.3 93/01/19 09:00:01 danner
+ * Better MP printouts. Save more state before getting to ddb
+ * in crashes. Still cannot continue from a crash, though.
+ * [93/01/15 af]
+ *
+ * Revision 2.2 93/01/14 17:14:37 danner
+ * Added reference to documentation source(s).
+ * [92/12/16 15:18:18 af]
+ *
+ * Created.
+ * [92/12/10 15:06:12 af]
+ *
+ */
+/*
+ * File: trap.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Trap Handlers for ALPHA
+ *
+ * This code was derived exclusively from information available in
+ * "Alpha Architecture Reference Manual", Richard L. Sites ed.
+ * Digital Press, Burlington, MA 01803
+ * ISBN 1-55558-098-X, Order no. EY-L520E-DP
+ */
+
+#include <mach_pcsample.h>
+#include <mach_kdb.h>
+
+#include <machine/machspl.h> /* spl definitions */
+#include <mach/exception.h>
+#include <mach/vm_param.h>
+#include <mach/alpha/alpha_instruction.h>
+#include <kern/thread.h>
+#include <kern/zalloc.h>
+#include <vm/vm_kern.h>
+#include <alpha/ast.h>
+#include <alpha/alpha_cpu.h>
+#include <alpha/trap.h>
+
+#define DEBUG 1
+
+extern zone_t msss_zone;
+
+extern char copymsg_start[], copymsg_end[];
+extern int copymsg_error();
+
+/*
+ * Parameters to exception() are 64 bits.
+ */
+extern void exception(
+ integer_t type,
+ integer_t code,
+ integer_t subcode);
+
+#if MACH_KDB
+boolean_t debug_all_traps_with_kdb = FALSE;
+extern struct db_watchpoint *db_watchpoint_list;
+extern boolean_t db_watchpoints_inserted;
+
+#endif MACH_KDB
+
+void
+user_page_fault_continue(kr)
+ kern_return_t kr;
+{
+ if (kr == KERN_SUCCESS) {
+#if MACH_KDB
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted) {
+ register thread_t self = current_thread();
+ register vm_map_t map = self->task->map;
+ register struct alpha_saved_state *mss =
+ &self->pcb->mss;
+
+db_printf("Fix trap.c & watchpoints");
+#if 0
+ if (((mss->cause & CAUSE_EXC_MASK) == EXC_TLBS) &&
+ db_find_watchpoint(map, mss->bad_address, mss))
+ (void) kdb_trap(mss, 2);
+#endif
+ }
+#endif MACH_KDB
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+
+#if MACH_KDB
+ if (debug_all_traps_with_kdb &&
+ kdb_trap(&current_thread()->pcb->mss, 1)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+#endif MACH_KDB
+
+ exception(EXC_BAD_ACCESS, kr, current_thread()->pcb->mss.bad_address);
+ /*NOTREACHED*/
+}
+
+/*
+ * Object:
+ * trap EXPORTED function
+ *
+ * Handle exceptions and faults.
+ *
+ */
+boolean_t syscalltrace = FALSE;
+boolean_t debug_verbose = FALSE;
+void (*alpha_machine_check)() = 0;
+
+trap( struct alpha_saved_state *ss_ptr,
+ unsigned long r4,
+ unsigned long r5,
+ unsigned long cause)
+{
+ thread_t t;
+ kern_return_t ret;
+ vm_map_t map;
+ vm_offset_t vaddr;
+ int exc_type, exc_code;
+ struct trap_frame *tf;
+
+if (ss_ptr == 0) gimmeabreak();
+if ( (debug_verbose > 2) || ! (syscalltrace && (cause == T_CHMK)) )
+if (debug_verbose) db_printf("{[%d]trap[%x](%x %x %x %x)}\n",
+ cpu_number(), &t, ss_ptr, r4, r5, cause);
+
+ t = current_thread();
+
+ tf = ss_ptr->framep;
+ if (alpha_user_mode(tf->saved_ps))
+ goto user_mode_traps;
+
+ /*
+ * Trap while in Kernel mode
+ */
+ switch (cause) {
+ case T_PROT_FAULT:
+ case T_TRANS_INVALID:
+ vaddr = (vm_offset_t) r4;
+
+ /*
+ * If the current map is a submap of the kernel map,
+ * and the address is within that map, fault on that
+ * map. If the same check is done in vm_fault
+ * (vm_map_lookup), we may deadlock on the kernel map
+ * lock. [dbg]
+ */
+ if (t == THREAD_NULL) /* startup & sanity */
+ map = kernel_map;
+ else {
+ map = t->task->map;
+ if (vaddr < vm_map_min(map) ||
+ vaddr >= vm_map_max(map))
+ map = kernel_map;
+ }
+
+ /*
+ * Register r5 contains the MMF flags:
+ * 8000000000000000 write fault
+ * 0000000000000000 read fault
+ * 0000000000000001 I-fetch fault
+ */
+ ret = vm_fault(map, trunc_page(vaddr),
+ (((long)r5) < 0)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : ((r5) ?
+ VM_PROT_READ|VM_PROT_EXECUTE:
+ VM_PROT_READ),
+ FALSE, FALSE, (void (*)()) 0);
+
+ if (ret == KERN_SUCCESS) {
+#if MACH_KDB
+ if (db_watchpoint_list &&
+ db_watchpoints_inserted &&
+ (((long)r5) < 0) &&
+ db_find_watchpoint((VM_MIN_ADDRESS <= vaddr) &&
+ (vaddr < VM_MAX_ADDRESS) &&
+ (t != THREAD_NULL) ?
+ t->task->map : kernel_map,
+ vaddr,
+ ss_ptr))
+ (void) kdb_trap(ss_ptr, 2);
+#endif MACH_KDB
+ return;
+ }
+
+ if (((vm_offset_t) copymsg_start <= tf->saved_pc) &&
+ (tf->saved_pc < (vm_offset_t) copymsg_end)) {
+ tf->saved_pc = (vm_offset_t) copymsg_error;
+ return;
+ }
+ if (t->recover) {
+ tf->saved_pc = t->recover;
+ t->recover = 0;
+ return;
+ }
+
+ break;
+ case T_SCHECK:
+ case T_PCHECK:
+ if (alpha_machine_check){
+ (*alpha_machine_check)(); /* XXXThis will need to
+ * XXXtake args if it is
+ * made real
+ */
+ return ;
+ }
+ else
+ panic ("Machine Check without a handler!\n");
+ break;
+ case T_READ_FAULT:
+ case T_WRITE_FAULT:
+ case T_EXECUTE_FAULT:
+ default:
+ break;
+ }
+#if MACH_KDB
+ /* locore did not record the faulting SP, for speed */
+ ss_ptr->sp = (vm_offset_t)(tf + 1) + (tf->saved_ps >> 56);
+#endif
+ goto fatal;
+
+ /*
+ * Trap while in User mode
+ */
+user_mode_traps:
+ switch (cause) {
+ case T_PROT_FAULT:
+ case T_TRANS_INVALID:
+ vaddr = (vm_offset_t) r4;
+ ss_ptr->bad_address = vaddr;
+ ss_ptr->cause = cause;
+ map = t->task->map;
+
+ ss_ptr->saved_frame = *tf;
+
+ (void) vm_fault(map, trunc_page(vaddr),
+ (((long)r5) < 0)
+ ? VM_PROT_READ|VM_PROT_WRITE
+ : ((r5) ?
+ VM_PROT_READ|VM_PROT_EXECUTE:
+ VM_PROT_READ),
+ FALSE,
+ FALSE, user_page_fault_continue);
+ /*NOTREACHED*/
+ break;
+ case T_CHMK:
+ trap_syscall(ss_ptr, tf);
+ return;
+ case T_FPA_DISABLED:
+{ static int memo = 0;
+if (!memo++) db_printf("Remember to stress-test FPA usage\n");
+}
+ {
+ /*
+ * Make sure the thread does have
+ * a floating-point save area.
+ */
+ register struct alpha_float_state *mfs;
+
+ mfs = t->pcb->mms.mfs;
+ if (mfs == 0) {
+ pcb_fpa_init(t);
+ mfs = t->pcb->mms.mfs;
+ }
+
+ t->pcb->mms.mfs = (struct alpha_float_state *)
+ ((vm_offset_t)mfs | 1); /* inuse */
+ alpha_fpa_loadup(mfs); /* leaves fpa enabled */
+ return;
+ }
+
+ case T_BP: { /* Breakpoint */
+ register struct alpha_sstep_state *msss;
+
+ /*
+ * If single stepping, remove breakpoints
+ * to minimize damage to other fellow threads.
+ * Otherwise it is a real breakpoint.
+ */
+ msss = t->pcb->mms.msss;
+ if (msss) {
+ t->pcb->mms.msss = 0;
+ exc_code = EXC_BREAK_SSTEP;
+ did_sstep(msss);
+ zfree(msss_zone, (vm_offset_t) msss);
+ } else
+ exc_code = EXC_BREAK_BPT;
+
+ exc_type = EXC_BREAKPOINT;
+ break;
+ }
+
+ case T_AST_K:
+ case T_AST_E:
+ case T_AST_S:
+ case T_AST_U:
+ case T_ILL:
+ case T_PAL:
+ case T_CHME:
+ case T_CHMS:
+ case T_CHMU:
+ exc_type = EXC_BAD_INSTRUCTION;
+ exc_code = EXC_ALPHA_RESOPND;
+ break;
+
+ case T_UNALIGNED:
+ exc_type = EXC_BAD_INSTRUCTION;
+ exc_code = EXC_ALPHA_RESADDR;
+ break;
+
+ case T_ARITHMETIC:
+ exc_type = EXC_ARITHMETIC;
+ exc_code = r5 & 0x7f; /* exception summary param */
+ cause = r4; /* register write mask */
+ break;
+
+/*notyet*/
+ case T_BUG:
+
+ default:
+ goto fatal;
+ }
+#if MACH_KDB
+ ss_ptr->saved_frame = *tf;
+ ss_ptr->cause = cause;
+ if (debug_all_traps_with_kdb && kdb_trap(ss_ptr, 1))
+ return;
+#endif MACH_KDB
+
+ /* Deliver the exception */
+ exception(exc_type, exc_code, cause);
+ /*NOTREACHED*/
+ return; /* help for the compiler */
+
+fatal:
+ ss_ptr->saved_frame = *tf;
+#if MACH_KDB
+ ss_ptr->cause = cause;
+ ss_ptr->bad_address = r4; /* most likely */
+ /* XXX r5 ? */
+ if (kdb_trap(ss_ptr, 1))
+ return;
+#endif
+ splhigh();
+ printf("Fatal kernel trap: ...\n");
+ printf("pc = %x, va = %x, sp = %x\n",
+ tf->saved_pc, ss_ptr->bad_address, ss_ptr->sp);
+ halt_all_cpus(1);
+}
+
+/* temp */
+#include <kern/syscall_sw.h>
+
+#if DEBUG
+char *syscall_names[77] = {
+"kern_invalid", /* 0 */ /* Unix */
+"kern_invalid", /* 1 */ /* Unix */
+"kern_invalid", /* 2 */ /* Unix */
+"kern_invalid", /* 3 */ /* Unix */
+"kern_invalid", /* 4 */ /* Unix */
+"kern_invalid", /* 5 */ /* Unix */
+"kern_invalid", /* 6 */ /* Unix */
+"kern_invalid", /* 7 */ /* Unix */
+"kern_invalid", /* 8 */ /* Unix */
+"kern_invalid", /* 9 */ /* Unix */
+"task_self", /* 10 */ /* obsolete */
+"thread_reply", /* 11 */ /* obsolete */
+"task_notify", /* 12 */ /* obsolete */
+"thread_self", /* 13 */ /* obsolete */
+"kern_invalid", /* 14 */
+"kern_invalid", /* 15 */
+"kern_invalid", /* 16 */
+"evc_wait", /* 17 */
+"kern_invalid", /* 18 */
+"kern_invalid", /* 19 */
+"msg_send_trap", /* 20 */ /* obsolete */
+"msg_receive_trap", /* 21 */ /* obsolete */
+"msg_rpc_trap", /* 22 */ /* obsolete */
+"kern_invalid", /* 23 */
+"kern_invalid", /* 24 */
+"mach_msg_trap", /* 25 */
+"mach_reply_port", /* 26 */
+"mach_thread_self", /* 27 */
+"mach_task_self", /* 28 */
+"mach_host_self", /* 29 */
+"kern_invalid", /* 30 */
+"kern_invalid", /* 31 */
+"kern_invalid", /* 32 */
+"kern_invalid", /* 33 */
+"kern_invalid", /* 34 */
+"kern_invalid", /* 35 */
+"kern_invalid", /* 36 */
+"kern_invalid", /* 37 */
+"kern_invalid", /* 38 */
+"kern_invalid", /* 39 */
+"kern_invalid", /* 40 */
+"kern_invalid", /* 41 */
+"kern_invalid", /* 42 */
+"kern_invalid", /* 43 */
+"kern_invalid", /* 44 */
+"kern_invalid", /* 45 */
+"kern_invalid", /* 46 */
+"kern_invalid", /* 47 */
+"kern_invalid", /* 48 */
+"kern_invalid", /* 49 */
+"kern_invalid", /* 50 */
+"kern_invalid", /* 51 */
+"kern_invalid", /* 52 */
+"kern_invalid", /* 53 */
+"kern_invalid", /* 54 */
+"host_self", /* 55 */
+"null_port", /* 56 */
+"kern_invalid", /* 57 */
+"kern_invalid", /* 58 */
+ "swtch_pri", /* 59 */
+"swtch", /* 60 */
+"thread_switch", /* 61 */
+"kern_invalid", /* 62 */
+"kern_invalid", /* 63 */
+"syscall_vm_map", /* 64 */
+"syscall_vm_allocate", /* 65 */
+"syscall_vm_deallocate",/* 66 */
+"kern_invalid", /* 67 */
+"syscall_task_create", /* 68 */
+"syscall_task_terminate", /* 69 */
+"syscall_task_suspend", /* 70 */
+"syscall_task_set_special_port",/* 71 */
+"syscall_mach_port_allocate", /* 72 */
+"syscall_mach_port_deallocate", /* 73 */
+"syscall_mach_port_insert_right", /* 74 */
+"syscall_mach_port_allocate_name", /* 75 */
+"syscall_thread_depress_abort" /* 76 */
+};
+
+#endif
+
+trap_syscall( struct alpha_saved_state *ss_ptr,
+ struct trap_frame *tf)
+{
+ register mach_trap_t *callp;
+ natural_t callno;
+ register eml_dispatch_t eml;
+ register thread_t t = current_thread();
+
+ /*
+ * Syscall redirection
+ */
+ eml = t->task->eml_dispatch;
+ if (eml) {
+ register natural_t min, count;
+ register vm_offset_t eml_pc;
+
+ callno = ss_ptr->v0;
+ min = eml->disp_min;
+ count = eml->disp_count;
+ /* This math is tricky cuz unsigned & overflow */
+ min = callno - min;
+ if ((count >= min) &&
+ ((eml_pc = eml->disp_vector[min]) != 0)) {
+#if 0
+ ss_ptr->v0 = ss_ptr->saved_frame.saved_pc;
+ ss_ptr->saved_frame.saved_pc = eml_pc;
+#else
+ register struct trap_frame *tf;
+
+ ss_ptr->v0 = ss_ptr->saved_frame.saved_pc;
+ ss_ptr->saved_frame.saved_pc = eml_pc;
+
+ tf = & STACK_MEL(active_stacks[cpu_number()])->tf;
+ ss_ptr->v0 = tf->saved_pc;
+ tf->saved_pc = eml_pc;
+#endif
+ return;
+ }
+ }
+
+ /*
+ * Native syscall
+ */
+ callno = -ss_ptr->v0;
+ if (callno > mach_trap_count)
+ goto invalid;
+
+ callp = &mach_trap_table[callno];
+
+ switch (callp->mach_trap_arg_count) {
+ case 0:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s() ", syscall_names[callno]);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)();
+ break;
+ case 1:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x) ", syscall_names[callno],
+ ss_ptr->a0);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0);
+ break;
+ case 2:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1);
+ break;
+ case 3:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1, ss_ptr->a2);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1, ss_ptr->a2);
+ break;
+ case 4:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x,%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3);
+ break;
+ case 5:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x,%x,%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4);
+ break;
+ case 6:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x,%x,%x,%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4, ss_ptr->a5);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4, ss_ptr->a5);
+ break;
+ case 7:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x,%x,%x,%x,%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4, ss_ptr->a5, ss_ptr->t0);
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4, ss_ptr->a5, ss_ptr->t0);
+ break;
+ case 11:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("%s(%x,%x,%x,%x,%x,%x,%x,%x,%x,%x,%x) ", syscall_names[callno],
+ ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4, ss_ptr->a5, ss_ptr->t0,
+ tf->saved_r2, tf->saved_r3, tf->saved_r4,
+ tf->saved_r5 );
+#endif
+ ss_ptr->v0 = (*callp->mach_trap_function)
+ (ss_ptr->a0, ss_ptr->a1, ss_ptr->a2, ss_ptr->a3,
+ ss_ptr->a4, ss_ptr->a5, ss_ptr->t0,
+ tf->saved_r2, tf->saved_r3, tf->saved_r4,
+ tf->saved_r5 );
+ break;
+ default:
+invalid:
+#if DEBUG
+ if (syscalltrace)
+ db_printf("invalid syscallno -%d\n", callno);
+#endif
+ ss_ptr->v0 = KERN_FAILURE;
+ }
+#if DEBUG
+ if (syscalltrace) {
+ db_printf("-> %x\n", ss_ptr->v0);
+ if (ss_ptr->v0 && (syscalltrace > 1)) gimmeabreak();
+ }
+#endif
+}
+
+void thread_syscall_return(ret)
+ kern_return_t ret;
+{
+#if __GNUC__
+ extern void __volatile__ Thread_syscall_return( kern_return_t );
+#endif
+#if DEBUG
+ if (syscalltrace) {
+ db_printf("-> %x\n", ret);
+ if (ret && (syscalltrace > 1)) gimmeabreak();
+ }
+#endif
+ Thread_syscall_return(ret);
+ /* NOTREACHED */
+}
+
+/* --- */
+#define MAX_CHANS 32 /* grow as needed */
+struct {
+ void (*routine)();
+ natural_t argument;
+} interrupt_vector[MAX_CHANS];
+
+interrupt_dispatch(chan, routine, argument)
+ void (*routine)();
+ natural_t argument;
+{
+ if (chan >= MAX_CHANS) panic("interrupt_dispatch");
+ interrupt_vector[chan].routine = routine;
+ interrupt_vector[chan].argument = argument;
+}
+
+#if 1/*DEBUG*/
+interrupt(struct alpha_saved_state *ss_ptr, int chan)
+{
+ (*interrupt_vector[chan].routine)(interrupt_vector[chan].argument,
+ ss_ptr->framep->saved_ps);
+}
+#else
+interrupt(xx, chan)
+{
+ (*interrupt_vector[chan].routine)(interrupt_vector[chan].argument);
+}
+#endif
+/* --- */
+
+stray_interrupt( struct alpha_saved_state *ss_ptr,
+ unsigned long r4,
+ unsigned long r5,
+ unsigned long cause)
+{
+#if MACH_KDB
+ gimmeabreak();
+#endif
+}
+
+stray_trap( struct alpha_saved_state *ss_ptr,
+ unsigned long r4,
+ unsigned long r5,
+ unsigned long cause)
+{
+#if MACH_KDB
+ gimmeabreak();
+#endif
+ panic("Unexpected trap");
+}
+
+
+/*
+ * Object:
+ * getreg_val EXPORTED function
+ *
+ * Return the value of a register in the exception frame
+ *
+ */
+vm_size_t *addrof_alpha_reg(regn, ss_ptr)
+ register unsigned regn;
+ struct alpha_saved_state *ss_ptr;
+{
+ switch (regn) {
+ case 0: return &ss_ptr->v0;
+ case 1: return &ss_ptr->t0;
+ case 2: return &ss_ptr->saved_frame.saved_r2;
+ case 3: return &ss_ptr->saved_frame.saved_r3;
+ case 4: return &ss_ptr->saved_frame.saved_r4;
+ case 5: return &ss_ptr->saved_frame.saved_r5;
+ case 6: return &ss_ptr->saved_frame.saved_r6;
+ case 7: return &ss_ptr->saved_frame.saved_r7;
+ case 8: return &ss_ptr->t7;
+ case 9: return &ss_ptr->s0;
+ case 10: return &ss_ptr->s1;
+ case 11: return &ss_ptr->s2;
+ case 12: return &ss_ptr->s3;
+ case 13: return &ss_ptr->s4;
+ case 14: return &ss_ptr->s5;
+ case 15: return &ss_ptr->s6;
+ case 16: return &ss_ptr->a0;
+ case 17: return &ss_ptr->a1;
+ case 18: return &ss_ptr->a2;
+ case 19: return &ss_ptr->a3;
+ case 20: return &ss_ptr->a4;
+ case 21: return &ss_ptr->a5;
+ case 22: return &ss_ptr->t8;
+ case 23: return &ss_ptr->t9;
+ case 24: return &ss_ptr->t10;
+ case 25: return &ss_ptr->t11;
+ case 26: return &ss_ptr->ra;
+ case 27: return &ss_ptr->t12;
+ case 28: return &ss_ptr->at;
+ case 29: return &ss_ptr->gp;
+ case 30: return &ss_ptr->sp;
+ default: return 0;
+ }
+}
+
+vm_size_t
+getreg_val(regn, ss_ptr)
+ register unsigned regn;
+ struct alpha_saved_state *ss_ptr;
+{
+ if (regn >= 31)
+ return 0;
+ return *addrof_alpha_reg(regn,ss_ptr);
+}
+
+/*
+ * Object:
+ * thread_exception_return EXPORTED function
+ *
+ *
+ */
+void
+thread_exception_return()
+{
+ register thread_t t = current_thread();
+ register pcb_t pcb = t->pcb;
+ register struct alpha_sstep_state *msss = pcb->mms.msss;
+
+ /*
+ * If single stepping, install breakpoints before
+ * getting back to user. This should be done as
+ * late as possible, to minimize the (unavoidable)
+ * interference with other threads in the same task.
+ * It would be nice to do it _after_ ASTs are taken.
+ */
+ if (msss)
+ prepare_sstep(msss, &pcb->mss);
+
+ thread_bootstrap_return();
+ /*NOTREACHED*/
+}
+
+/*
+ * Object:
+ * prepare_sstep LOCAL function
+ *
+ * Install breakpoints to realize single stepping in
+ * software. Either one or two will be needed, depending
+ * on whether we are at a branching instruction or not.
+ * Note that only the target thread should execute this.
+ *
+ */
+#define SSTEP_INSTRUCTION (0x00000080)
+
+prepare_sstep(msss, mss)
+ register struct alpha_sstep_state *msss;
+ register struct alpha_saved_state *mss;
+{
+ extern vm_offset_t branch_taken();
+ alpha_instruction ins;
+ alpha_instruction *brpc, *pc;
+ register struct breakpoint *bp = msss->ss_bp;
+
+ pc = (alpha_instruction *) mss->saved_frame.saved_pc;
+
+ /*
+ * NOTE: we might reenter because an AST stopped us on the way out
+ */
+ if (msss->ss_count)
+ return;
+
+ if (copyin(pc, &ins, sizeof ins))
+ return; /* no harm done */
+
+ if (isa_call(ins) || isa_branch(ins)) {
+ brpc = (alpha_instruction *)
+ branch_taken(ins, pc, getreg_val, mss);
+ if (brpc != pc) {
+ bp->address = (vm_offset_t) brpc;
+ if (copyin(brpc, &bp->instruction, sizeof ins))
+ goto seq; /* he'll get hurt */
+ if (poke_instruction(brpc, SSTEP_INSTRUCTION))
+ goto seq; /* ditto */
+ msss->ss_count++, bp++;
+ }
+ }
+seq:
+ pc += 1;
+ bp->address = (vm_offset_t) pc;
+ if (copyin(pc, &bp->instruction, sizeof ins))
+ return; /* he'll get hurt */
+ if (poke_instruction(pc, SSTEP_INSTRUCTION))
+ return;
+ msss->ss_count++;
+}
+
+/*
+ * Object:
+ * did_sstep LOCAL function
+ *
+ * Remove the breakpoints installed by the above function,
+ * pay attention to what other threads might have done to
+ * the task's memory in the meantime. Yes, there _are_
+ * problems for this on a multiprocessor.
+ */
+did_sstep(msss)
+ register struct alpha_sstep_state *msss;
+{
+ alpha_instruction ins, *pc;
+ register struct breakpoint *bp = msss->ss_bp;
+
+ for (; msss->ss_count-- > 0; bp++) {
+ pc = (alpha_instruction *) bp->address;
+ if (copyin(pc, &ins, sizeof ins))
+ continue;
+ if (ins.bits != SSTEP_INSTRUCTION)
+ continue;
+ poke_instruction(pc, bp->instruction);
+ }
+ msss->ss_count = 0;
+}
+
+/*
+ * Object:
+ * poke_instruction LOCAL function
+ *
+ * Put an instruction in my address space. Does not
+ * change protections, but might fault in text pages.
+ */
+poke_instruction(loc, ins)
+ alpha_instruction *loc, ins;
+{
+ vm_map_t map = current_thread()->task->map;
+ vm_offset_t pa;
+ kern_return_t ret;
+again:
+ pa = pmap_extract(map->pmap, (vm_offset_t) loc);
+ if (pa == 0) {
+ ret = vm_fault(map, trunc_page(loc), VM_PROT_READ,
+ FALSE, FALSE, (void (*)()) 0);
+ if (ret != KERN_SUCCESS)
+ return 1;
+ goto again;
+ }
+ loc = (alpha_instruction *)(PHYS_TO_K0SEG(pa));
+ *loc = ins;
+ alphacache_Iflush();
+ return 0;
+}
+
+#if MACH_KDB
+/*
+ * Object:
+ * thread_kdb_return EXPORTED function
+ *
+ * Try for debugger, to user if ok.
+ *
+ */
+void
+thread_kdb_return()
+{
+ if (kdb_trap(&current_thread()->pcb->mss, 1)) {
+ thread_exception_return();
+ /*NOTREACHED*/
+ }
+}
+#endif MACH_KDB
+
+#if MACH_PCSAMPLE
+/*
+ * return saved state for interrupted user thread
+ */
+vm_offset_t
+interrupted_pc(thread_t t)
+{
+ register struct alpha_saved_state *mss;
+
+ mss = &(USER_REGS(t)->mss);
+ return mss->framep->saved_pc;
+}
+#endif /*MACH_PCSAMPLE*/
diff --git a/alpha/alpha/trap.h b/alpha/alpha/trap.h
new file mode 100644
index 00000000..265867c8
--- /dev/null
+++ b/alpha/alpha/trap.h
@@ -0,0 +1,72 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS-IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon the
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: trap.h,v $
+ * Revision 2.2 93/01/14 17:14:42 danner
+ * Created.
+ * [92/12/10 af]
+ *
+ */
+/*
+ * File: trap.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * Trap defined for ALPHA
+ */
+ /* pc incremented ? */
+#define T_FPA_DISABLED 1
+#define T_PROT_FAULT 2
+#define T_TRANS_INVALID 3
+#define T_READ_FAULT 4
+#define T_WRITE_FAULT 5
+#define T_EXECUTE_FAULT 6
+#define T_ARITHMETIC 7 /* yes */
+#define T_AST_K 8
+#define T_AST_E 9
+#define T_AST_S 10
+#define T_AST_U 11
+#define T_UNALIGNED 12 /* yes */
+#define T_BP 13 /* yes */
+#define T_BUG 14 /* yes */
+#define T_ILL 15 /* yes */
+#define T_PAL 16 /* yes */
+#define T_CHMK 17 /* yes */
+#define T_CHME 18 /* yes */
+#define T_CHMS 19 /* yes */
+#define T_CHMU 20 /* yes */
+#define T_SOFT_INT 21
+#define T_SCE 22
+#define T_PCE 23
+#define T_PFAIL 24
+#define T_SCHECK 25
+#define T_PCHECK 26
+ /* other, software defined */
+#define T_UNEXPECTED 27
+#define T_BAD_INTERRUPT 28
+
+#define TRAP_TYPES 28
diff --git a/alpha/alpha/vm_tuning.h b/alpha/alpha/vm_tuning.h
new file mode 100644
index 00000000..6c8fe8ed
--- /dev/null
+++ b/alpha/alpha/vm_tuning.h
@@ -0,0 +1,45 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: vm_tuning.h,v $
+ * Revision 2.2 93/01/14 17:15:01 danner
+ * Created, obsolete.
+ * [92/06/02 af]
+ *
+ */
+/*
+ * File: alpha/vm_tuning.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 6/92
+ *
+ * VM tuning parameters for the alpha.
+ */
+
+#ifndef _ALPHA_VM_TUNING_H_
+#define _ALPHA_VM_TUNING_H_
+
+#endif _ALPHA_VM_TUNING_H_
diff --git a/alpha/dec/ln_copy.c b/alpha/dec/ln_copy.c
new file mode 100644
index 00000000..fda36fcb
--- /dev/null
+++ b/alpha/dec/ln_copy.c
@@ -0,0 +1,288 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: ln_copy.c,v $
+ * Revision 2.2 93/03/09 10:48:25 danner
+ * Jeffrey Heller created this expounding from my mips code.
+ * [93/03/06 14:26:06 af]
+ *
+ */
+/*
+ * File: ln_copy.c
+ * Torn from: mips/PMAX/kn01.c and mips/PMAX/kn02ba.c
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 9/90
+ *
+ * Routines specific to the KN01 processor (pmax)
+ */
+
+#include <mach/std_types.h>
+
+/*
+ * Object:
+ * copyin_gap16 EXPORTED function
+ * copyout_gap16 EXPORTED function
+ * bzero_gap16 EXPORTED function
+ *
+ * Specialized memory copy/zero for pmax-like I/O memory
+ * Copyin moves data from lance to host memory,
+ * Copyout the other way around and Bzero you know.
+ *
+ */
+copyin_gap16(rbuf, dp, nbytes)
+ register volatile int *rbuf;
+ register unsigned short *dp;
+ register unsigned nbytes;
+{
+ register int nshorts;
+
+ /*
+ * Cannot use the normal bcopy since we are moving
+ * from Pmax space into LANCE space.
+ * Use the "Duff device" with a 16bits width.
+ */
+ while (nbytes) {
+ switch (nshorts = (nbytes >> 1)) {
+ default:
+ case 32:
+ nshorts = 32;
+ dp[31] = rbuf[31];
+ case 31: dp[30] = rbuf[30];
+ case 30: dp[29] = rbuf[29];
+ case 29: dp[28] = rbuf[28];
+ case 28: dp[27] = rbuf[27];
+ case 27: dp[26] = rbuf[26];
+ case 26: dp[25] = rbuf[25];
+ case 25: dp[24] = rbuf[24];
+ case 24: dp[23] = rbuf[23];
+ case 23: dp[22] = rbuf[22];
+ case 22: dp[21] = rbuf[21];
+ case 21: dp[20] = rbuf[20];
+ case 20: dp[19] = rbuf[19];
+ case 19: dp[18] = rbuf[18];
+ case 18: dp[17] = rbuf[17];
+ case 17: dp[16] = rbuf[16];
+ case 16: dp[15] = rbuf[15];
+ case 15: dp[14] = rbuf[14];
+ case 14: dp[13] = rbuf[13];
+ case 13: dp[12] = rbuf[12];
+ case 12: dp[11] = rbuf[11];
+ case 11: dp[10] = rbuf[10];
+ case 10: dp[9] = rbuf[9];
+ case 9: dp[8] = rbuf[8];
+ case 8: dp[7] = rbuf[7];
+ case 7: dp[6] = rbuf[6];
+ case 6: dp[5] = rbuf[5];
+ case 5: dp[4] = rbuf[4];
+ case 4: dp[3] = rbuf[3];
+ case 3: dp[2] = rbuf[2];
+ case 2: dp[1] = rbuf[1];
+ case 1: dp[0] = rbuf[0];
+ break;
+ case 0:
+ /* Last byte.
+ * This really happens. Kinetic boxes, for example,
+ * send 0x119 + 0t14+0t4 byte packets.
+ */
+ *(char *)dp = *(char *)rbuf++;
+ return;
+ }
+ rbuf += nshorts;
+ dp += nshorts;
+ nbytes -= nshorts << 1;
+ }
+}
+
+
+copyout_gap16(dp,sbuf,len)
+ register unsigned short *dp;
+ register volatile int *sbuf;
+ register int len;
+{
+ register int nshorts;
+
+ /*
+ * Cannot use the normal bcopy since we are moving
+ * from Pmax space into LANCE space.
+ * Use the "Duff device" with a 16bits width.
+ */
+ while (len) {
+ switch (nshorts = (len >> 1)) {
+ default:
+ case 32: nshorts = 32;
+ sbuf[31] = dp[31];
+ case 31: sbuf[30] = dp[30];
+ case 30: sbuf[29] = dp[29];
+ case 29: sbuf[28] = dp[28];
+ case 28: sbuf[27] = dp[27];
+ case 27: sbuf[26] = dp[26];
+ case 26: sbuf[25] = dp[25];
+ case 25: sbuf[24] = dp[24];
+ case 24: sbuf[23] = dp[23];
+ case 23: sbuf[22] = dp[22];
+ case 22: sbuf[21] = dp[21];
+ case 21: sbuf[20] = dp[20];
+ case 20: sbuf[19] = dp[19];
+ case 19: sbuf[18] = dp[18];
+ case 18: sbuf[17] = dp[17];
+ case 17: sbuf[16] = dp[16];
+ case 16: sbuf[15] = dp[15];
+ case 15: sbuf[14] = dp[14];
+ case 14: sbuf[13] = dp[13];
+ case 13: sbuf[12] = dp[12];
+ case 12: sbuf[11] = dp[11];
+ case 11: sbuf[10] = dp[10];
+ case 10: sbuf[9] = dp[9];
+ case 9: sbuf[8] = dp[8];
+ case 8: sbuf[7] = dp[7];
+ case 7: sbuf[6] = dp[6];
+ case 6: sbuf[5] = dp[5];
+ case 5: sbuf[4] = dp[4];
+ case 4: sbuf[3] = dp[3];
+ case 3: sbuf[2] = dp[2];
+ case 2: sbuf[1] = dp[1];
+ case 1: sbuf[0] = dp[0];
+ break;
+ case 0: {
+ /* Last byte of this buffer */
+ register unsigned short c;
+
+ wbflush();
+ c = *(unsigned short*)sbuf;
+#if BYTE_MSF
+ sbuf[0] = (c & 0x00ff) | ((*((unsigned char *)dp))<<8);
+#else /*BYTE_MSF*/
+ sbuf[0] = (c & 0xff00) | *((unsigned char *)dp);
+#endif /*BYTE_MSF*/
+ return;
+ }
+ }
+ sbuf += nshorts;
+ dp += nshorts;
+ len -= (nshorts << 1);
+ }
+}
+
+bzero_gap16(addr, len)
+ vm_offset_t addr;
+ vm_size_t len;
+{
+ /* no big deal if we zero twice */
+ bzero(addr, len << 1);
+}
+
+
+/*
+ * Object:
+ * copyout_gap32 EXPORTED function
+ *
+ * Copy data to lance (data) buffer: dma is 4 words every
+ * other 4 words. We know: 'to' is aligned to a 4 words
+ * boundary, 'from' is short-aligned. Too bad if we
+ * copy a bit too much.
+ */
+copyout_gap32(from, to, len)
+ register unsigned int *from;
+ register unsigned int *to;
+ register int len;
+{
+ register unsigned int t0,t1,t2,t3;
+
+ if ((((vm_offset_t)from) & 0x2) == 0)
+ goto good_day;
+
+ /* a bad day, only happens on small inline (arp) pkts */
+ while (len > 0) {
+ *((short *)to) = *((short*)from);
+ to = (unsigned int *)((char*)to + 2);
+ from = (unsigned int *)((char*)from + 2);
+ len -= 2;
+ if (((vm_offset_t)to & 0xf) == 0)
+ to += 4;/*dma twist*/
+ }
+ return;
+
+good_day:
+ while (len > 0) {
+ t0 = from[0]; t1 = from[1]; t2 = from[2]; t3 = from[3];
+ from += 4;
+ to[0] = t0; to[1] = t1; to[2] = t2; to[3] = t3;
+ to += 8;/*dma twist!*/
+ len -= 4 * sizeof(int);
+ }
+}
+
+
+/*
+ * Object:
+ * copyin_gap32 EXPORTED function
+ *
+ * Copy data from lance (data) buffer: dma is 4 words every
+ * other 4 words. Called in two modes: (a) for the ether header
+ * which is 14 bytes, word aligned (b) for the data, which
+ * is any size but the source is short aligned (triple sigh).
+ * Destinations are word aligned in both cases
+ */
+copyin_gap32(from, to, len)
+ register unsigned int *from;
+ register unsigned int *to;
+ register int len;
+{
+ /* no need for generalities, just do it fast */
+ if (len <= 16) {
+ /* ether header */
+ register int t0,t1,t2,t3;
+
+ t0 = from[0]; t1 = from[1]; t2 = from[2]; t3 = from[3];
+ to[0] = t0; to[1] = t1; to[2] = t2; to[3] = t3;
+
+ } else {
+ /* data */
+ register unsigned int t0,t1,t2,t3;
+ register unsigned short s0;
+
+ s0 = *(unsigned short *)from;
+ from = (unsigned int *)(((short*)from) + 1); /* aligned now */
+ from += 4; /* skipto */
+ len -= sizeof(short);
+
+ while (len > 0) {
+ t0 = from[0]; t1 = from[1]; t2 = from[2]; t3 = from[3];
+ from += 8;/*dma twist!*/
+ /* byteorderdep */
+ to[0] = s0 | (t0 << 16);
+ to[1] = (t0 >> 16) | (t1 << 16);
+ to[2] = (t1 >> 16) | (t2 << 16);
+ to[3] = (t2 >> 16) | (t3 << 16);
+ s0 = t3 >> 16;
+ to += 4;
+ len -= 4 * sizeof(int);
+ }
+ *(unsigned short *)to = s0;
+ }
+}
+
diff --git a/alpha/include/mach/alpha/alpha_instruction.h b/alpha/include/mach/alpha/alpha_instruction.h
new file mode 100644
index 00000000..9635f1dd
--- /dev/null
+++ b/alpha/include/mach/alpha/alpha_instruction.h
@@ -0,0 +1,690 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1993,1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: alpha_instruction.h,v $
+ * Revision 2.3 93/01/19 09:01:18 danner
+ * Added some, removed some PAL calls.
+ * Chosed not to put here PAL instructions (for ddb)
+ * because I am sadly afraid they might be chip-specific.
+ * We'll see.
+ * [93/01/15 af]
+ *
+ * Revision 2.2 93/01/14 17:40:36 danner
+ * Created.
+ * [91/12/05 af]
+ *
+ */
+
+/*
+ * File: alpha_instruction.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 11/91
+ *
+ * Alpha Instruction set definition
+ *
+ * Reference: "Alpha System Reference Manual", V4.0, April 1991
+ *
+ */
+
+#ifndef _ALPHA_INSTRUCTION_H_
+#define _ALPHA_INSTRUCTION_H_ 1
+
+#if !defined(ASSEMBLER)
+
+/*
+ * All instructions are in one of five formats:
+ * Memory, Branch, Operate, Floating-point Operate, PAL
+ *
+ * One minor departure from DEC's conventions is we use names
+ * for registers that are more akin their software use, e.g.
+ * rather then bluntly call them Ra/Rb/Rc we make clear which
+ * one is a source (Rs) and which one is a destination (Rd).
+ * When a second source register is defined we call it Rt.
+ */
+
+
+typedef union {
+ /*
+ * All instructions are 32 bits wide
+ */
+ unsigned int bits;
+
+ /*
+ * Memory instructions contain a 16 bit
+ * signed immediate value and two register
+ * specifiers
+ */
+ struct {
+ signed short displacement;
+ unsigned rs : 5,
+ rd : 5,
+ opcode : 6;
+ } mem_format;
+
+ /*
+ * Branch instruction contain a 21 bit offset,
+ * which is sign-extended, shifted and combined
+ * with the PC to form a 64 bit destination address.
+ *
+ * In computed jump instructions the opcode is further
+ * specified in the offset field, the rest of it is
+ * used as branch target hint. The destination of the
+ * jump is the source register.
+ */
+ struct {
+ signed int displacement : 21;
+ unsigned rd : 5,
+ opcode : 6;
+ } branch_format;
+
+ struct {
+ signed int hint : 14;
+ unsigned action : 2,
+ rs : 5,
+ rd : 5,
+ opcode : 6;
+ } jump_format;
+
+
+ /*
+ * Operate instructions are of two types, with
+ * a second source register or with a literal
+ * specifier. Bit 12 sez which is which.
+ */
+ struct {
+ unsigned rd : 5,
+ function : 7,
+ sbz : 4,
+ rt : 5,
+ rs : 5,
+ opcode : 6;
+ } operate_reg_format;
+
+ struct {
+ unsigned rd : 5,
+ function : 7,
+ one : 1,
+ literal : 8,
+ rs : 5,
+ opcode : 6;
+ } operate_lit_format;
+
+
+ /*
+ * Floating point operate instruction are quite
+ * uniform in the encoding. As for the semantics..
+ */
+ struct {
+ unsigned fd : 5,
+ function : 11,
+ ft : 5,
+ fs : 5,
+ opcode : 6;
+ } float_format;
+
+
+ /*
+ * PAL instructions just define the major opcode
+ */
+
+ struct {
+ unsigned function : 26,
+ opcode : 6;
+ } pal_format;
+
+} alpha_instruction;
+
+#endif !defined(ASSEMBLER)
+
+/*
+ *
+ * Encoding of regular instructions (Appendix C op cit)
+ *
+ */
+
+ /* OPCODE, bits 26..31 */
+
+#define op_pal 0x00 /* see PAL sub-table */
+ /* 1..7 reserved */
+#define op_lda 0x08
+#define op_ldah 0x09
+ /* reserved */
+#define op_ldq_u 0x0b
+ /* c..e reserved */
+#define op_stq_u 0x0f
+
+#define op_arit 0x10 /* see ARIT sub-table */
+#define op_logical 0x11 /* see LOGICAL sub-table */
+#define op_bit 0x12 /* see BIT sub-table */
+#define op_mul 0x13 /* see MUL sub-table */
+ /* reserved */
+#define op_vax_float 0x15 /* see FLOAT sub-table */
+#define op_ieee_float 0x16 /* see FLOAT sub-table */
+#define op_any_float 0x17 /* see FLOAT sub-table */
+
+#define op_special 0x18 /* see SPECIAL sub-table */
+#define op_pal19 0x19 /* reserved for pal code */
+#define op_j 0x1a /* see JUMP sub-table */
+#define op_pal1b 0x1b /* reserved for pal code */
+ /* reserved */
+#define op_pal1d 0x1d /* reserved for pal code */
+#define op_pal1e 0x1e /* reserved for pal code */
+#define op_pal1f 0x1f /* reserved for pal code */
+
+#define op_ldf 0x20
+#define op_ldg 0x21
+#define op_lds 0x22
+#define op_ldt 0x23
+#define op_stf 0x24
+#define op_stg 0x25
+#define op_sts 0x26
+#define op_stt 0x27
+#define op_ldl 0x28
+#define op_ldq 0x29
+#define op_ldl_l 0x2a
+#define op_ldq_l 0x2b
+#define op_stl 0x2c
+#define op_stq 0x2d
+#define op_stl_c 0x2e
+#define op_stq_c 0x2f
+#define op_br 0x30
+#define op_fbeq 0x31
+#define op_fblt 0x32
+#define op_fble 0x33
+#define op_bsr 0x34
+#define op_fbne 0x35
+#define op_fbge 0x36
+#define op_fbgt 0x37
+#define op_blbc 0x38
+#define op_beq 0x39
+#define op_blt 0x3a
+#define op_ble 0x3b
+#define op_blbs 0x3c
+#define op_bne 0x3d
+#define op_bge 0x3e
+#define op_bgt 0x3f
+
+
+ /* PAL, "function" opcodes (bits 0..25) */
+/*
+ * What we will implement is TBD. These are the unprivileged ones
+ * that we probably have to support for compat reasons.
+ */
+
+#define op_bpt 0x0080
+#define op_chmk 0x0083
+#define op_imb 0x0086
+#define op_rei 0x0092
+
+/*
+ * Privileged ones
+ */
+
+#define op_halt 0x0000
+#define op_ldqp 0x0003
+#define op_stqp 0x0004
+#define op_swpctxt 0x0005
+/*#define op_swppal 0x000a */
+#define op_mtpr_fen 0x000c
+#define op_mtpr_ipir 0x000d
+#define op_mfpr_ipl 0x000e
+#define op_mtpr_ipl 0x000f
+#define op_mfpr_mces 0x0010
+#define op_mtpr_mces 0x0011
+#define op_mfpr_prbr 0x0013
+#define op_mtpr_prbr 0x0014
+#define op_mfpr_ptbr 0x0015
+#define op_mtpr_scbb 0x0017
+#define op_mtpr_sirr 0x0018
+#define op_mtpr_tbia 0x001b
+#define op_mtpr_tbiap 0x001c
+#define op_mtpr_tbis 0x001d
+#define op_mfpr_usp 0x0022
+#define op_mtpr_usp 0x0023
+#define op_mfpr_whami 0x003f
+
+
+ /* ARIT, "function" opcodes (bits 5..11) */
+
+#define op_addl 0x00
+#define op_s4addl 0x02
+#define op_subl 0x09
+#define op_s4subl 0x0b
+#define op_cmpbge 0x0f
+#define op_s8addl 0x12
+#define op_s8subl 0x1b
+#define op_cmpult 0x1d
+#define op_addq 0x20
+#define op_s4addq 0x22
+#define op_subq 0x29
+#define op_s4subq 0x2b
+#define op_cmpeq 0x2d
+#define op_s8addq 0x32
+#define op_s8subq 0x3b
+#define op_cmpule 0x3d
+#define op_addl_v 0x40
+#define op_subl_v 0x49
+#define op_cmplt 0x4d
+#define op_addq_v 0x60
+#define op_subq_v 0x69
+#define op_cmple 0x6d
+
+
+ /* LOGICAL, "function" opcodes (bits 5..11) */
+
+#define op_and 0x00
+#define op_andnot 0x08 /* bic */
+#define op_cmovlbs 0x14
+#define op_cmovlbc 0x16
+#define op_or 0x20 /* bis */
+#define op_cmoveq 0x24
+#define op_cmovne 0x26
+#define op_ornot 0x28
+#define op_xor 0x40
+#define op_cmovlt 0x44
+#define op_cmovge 0x46
+#define op_xornot 0x48 /* eqv */
+#define op_cmovle 0x64
+#define op_cmovgt 0x66
+
+ /* BIT, "function" opcodes (bits 5..11) */
+
+#define op_mskbl 0x02
+#define op_extbl 0x06
+#define op_insbl 0x0b
+#define op_mskwl 0x12
+#define op_extwl 0x16
+#define op_inswl 0x1b
+#define op_mskll 0x22
+#define op_extll 0x26
+#define op_insll 0x2b
+#define op_zap 0x30
+#define op_zapnot 0x31
+#define op_mskql 0x32
+#define op_srl 0x34
+#define op_extql 0x36
+#define op_sll 0x39
+#define op_insql 0x3b
+#define op_sra 0x3c
+#define op_mskwh 0x52
+#define op_inswh 0x57
+#define op_extwh 0x5a
+#define op_msklh 0x62
+#define op_inslh 0x67
+#define op_extlh 0x6a
+#define op_extqh 0x7a
+#define op_insqh 0x77
+#define op_mskqh 0x72
+
+ /* MUL, "function" opcodes (bits 5..11) */
+
+#define op_mull 0x00
+#define op_mulq_v 0x60
+#define op_mull_v 0x40
+#define op_umulh 0x30
+#define op_mulq 0x20
+
+
+ /* SPECIAL, "displacement" opcodes (bits 0..15) */
+
+#define op_draint 0x0000
+#define op_mb 0x4000
+#define op_fetch 0x8000
+#define op_fetch_m 0xa000
+#define op_rpcc 0xc000
+#define op_rc 0xe000
+#define op_rs 0xf000
+
+ /* JUMP, "action" opcodes (bits 14..15) */
+
+#define op_jmp 0x0
+#define op_jsr 0x1
+#define op_ret 0x2
+#define op_jcr 0x3
+
+
+/*
+ *
+ * Encoding of floating point instructions (pagg. C-5..6 op cit)
+ *
+ * Load and store operations use opcodes op_ldf..op_stt
+ */
+
+ /* any FLOAT, "function" opcodes (bits 5..11) */
+
+#define op_cvtlq 0x010
+#define op_cpys 0x020
+#define op_cpysn 0x021
+#define op_cpyse 0x022
+#define op_mt_fpcr 0x024
+#define op_mf_fpcr 0x025
+#define op_fcmoveq 0x02a
+#define op_fcmovne 0x02b
+#define op_fcmovlt 0x02c
+#define op_fcmovge 0x02d
+#define op_fcmovle 0x02e
+#define op_fcmovgt 0x02f
+#define op_cvtql 0x030
+#define op_cvtql_v 0x130
+#define op_cvtql_sv 0x330
+
+
+ /* ieee FLOAT, "function" opcodes (bits 5..11) */
+
+#define op_adds_c 0x000
+#define op_subs_c 0x001
+#define op_muls_c 0x002
+#define op_divs_c 0x003
+#define op_addt_c 0x020
+#define op_subt_c 0x021
+#define op_mult_c 0x022
+#define op_divt_c 0x023
+#define op_cvtts_c 0x02c
+#define op_cvttq_c 0x02f
+#define op_cvtqs_c 0x03c
+#define op_cvtqt_c 0x03e
+#define op_adds_m 0x040
+#define op_subs_m 0x041
+#define op_muls_m 0x042
+#define op_divs_m 0x043
+#define op_addt_m 0x060
+#define op_subt_m 0x061
+#define op_mult_m 0x062
+#define op_divt_m 0x063
+#define op_cvtts_m 0x06c
+#define op_cvtqs_m 0x07c
+#define op_cvtqt_m 0x07e
+#define op_adds 0x080
+#define op_subs 0x081
+#define op_muls 0x082
+#define op_divs 0x083
+#define op_addt 0x0a0
+#define op_subt 0x0a1
+#define op_mult 0x0a2
+#define op_divt 0x0a3
+#define op_cmptun 0x0a4
+#define op_cmpteq 0x0a5
+#define op_cmptlt 0x0a6
+#define op_cmptle 0x0a7
+#define op_cvtts 0x0ac
+#define op_cvttq 0x0af
+#define op_cvtqs 0x0bc
+#define op_cvtqt 0x0be
+#define op_adds_d 0x0c0
+#define op_subs_d 0x0c1
+#define op_muls_d 0x0c2
+#define op_divs_d 0x0c3
+#define op_addt_d 0x0e0
+#define op_subt_d 0x0e1
+#define op_mult_d 0x0e2
+#define op_divt_d 0x0e3
+#define op_cvtts_d 0x0ec
+#define op_cvtqs_d 0x0fc
+#define op_cvtqt_d 0x0fe
+#define op_adds_uc 0x100
+#define op_subs_uc 0x101
+#define op_muls_uc 0x102
+#define op_divs_uc 0x103
+#define op_addt_uc 0x120
+#define op_subt_uc 0x121
+#define op_mult_uc 0x122
+#define op_divt_uc 0x123
+#define op_cvtts_uc 0x12c
+#define op_cvttq_vc 0x12f
+#define op_adds_um 0x140
+#define op_subs_um 0x141
+#define op_muls_um 0x142
+#define op_divs_um 0x143
+#define op_addt_um 0x160
+#define op_subt_um 0x161
+#define op_mult_um 0x162
+#define op_divt_um 0x163
+#define op_cvtts_um 0x16c
+#define op_adds_u 0x180
+#define op_subs_u 0x181
+#define op_muls_u 0x182
+#define op_divs_u 0x183
+#define op_addt_u 0x1a0
+#define op_subt_u 0x1a1
+#define op_mult_u 0x1a2
+#define op_divt_u 0x1a3
+#define op_cvtts_u 0x1ac
+#define op_cvttq_v 0x1af
+#define op_adds_ud 0x1c0
+#define op_subs_ud 0x1c1
+#define op_muls_ud 0x1c2
+#define op_divs_ud 0x1c3
+#define op_addt_ud 0x1e0
+#define op_subt_ud 0x1e1
+#define op_mult_ud 0x1e2
+#define op_divt_ud 0x1e3
+#define op_cvtts_ud 0x1ec
+#define op_adds_suc 0x500
+#define op_subs_suc 0x501
+#define op_muls_suc 0x502
+#define op_divs_suc 0x503
+#define op_addt_suc 0x520
+#define op_subt_suc 0x521
+#define op_mult_suc 0x522
+#define op_divt_suc 0x523
+#define op_cvtts_suc 0x52c
+#define op_cvttq_svc 0x52f
+#define op_adds_sum 0x540
+#define op_subs_sum 0x541
+#define op_muls_sum 0x542
+#define op_divs_sum 0x543
+#define op_addt_sum 0x560
+#define op_subt_sum 0x561
+#define op_mult_sum 0x562
+#define op_divt_sum 0x563
+#define op_cvtts_sum 0x56c
+#define op_adds_su 0x580
+#define op_subs_su 0x581
+#define op_muls_su 0x582
+#define op_divs_su 0x583
+#define op_addt_su 0x5a0
+#define op_subt_su 0x5a1
+#define op_mult_su 0x5a2
+#define op_divt_su 0x5a3
+#define op_cmptun_su 0x5a4
+#define op_cmpteq_su 0x5a5
+#define op_cmptlt_su 0x5a6
+#define op_cmptle_su 0x5a7
+#define op_cvtts_su 0x5ac
+#define op_cvttq_sv 0x5af
+#define op_adds_sud 0x5c0
+#define op_subs_sud 0x5c1
+#define op_muls_sud 0x5c2
+#define op_divs_sud 0x5c3
+#define op_addt_sud 0x5e0
+#define op_subt_sud 0x5e1
+#define op_mult_sud 0x5e2
+#define op_divt_sud 0x5e3
+#define op_cvtts_sud 0x5ec
+#define op_adds_suic 0x700
+#define op_subs_suic 0x701
+#define op_muls_suic 0x702
+#define op_divs_suic 0x703
+#define op_addt_suic 0x720
+#define op_subt_suic 0x721
+#define op_mult_suic 0x722
+#define op_divt_suic 0x723
+#define op_cvtts_suic 0x72c
+#define op_cvttq_svic 0x72f
+#define op_cvtqs_suic 0x73c
+#define op_cvtqt_suic 0x73e
+#define op_adds_suim 0x740
+#define op_subs_suim 0x741
+#define op_muls_suim 0x742
+#define op_divs_suim 0x743
+#define op_addt_suim 0x760
+#define op_subt_suim 0x761
+#define op_mult_suim 0x762
+#define op_divt_suim 0x763
+#define op_cvtts_suim 0x76c
+#define op_cvtqs_suim 0x77c
+#define op_cvtqt_suim 0x77e
+#define op_adds_sui 0x780
+#define op_subs_sui 0x781
+#define op_muls_sui 0x782
+#define op_divs_sui 0x783
+#define op_addt_sui 0x7a0
+#define op_subt_sui 0x7a1
+#define op_mult_sui 0x7a2
+#define op_divt_sui 0x7a3
+#define op_cvtts_sui 0x7ac
+#define op_cvttq_svi 0x7af
+#define op_cvtqs_sui 0x7bc
+#define op_cvtqt_sui 0x7be
+#define op_adds_suid 0x7c0
+#define op_subs_suid 0x7c1
+#define op_muls_suid 0x7c2
+#define op_divs_suid 0x7c3
+#define op_addt_suid 0x7e0
+#define op_subt_suid 0x7e1
+#define op_mult_suid 0x7e2
+#define op_divt_suid 0x7e3
+#define op_cvtts_suid 0x7ec
+#define op_cvtqs_suid 0x7fc
+#define op_cvtqt_suid 0x7fe
+
+
+ /* vax FLOAT, "function" opcodes (bits 5..11) */
+
+#define op_addf_c 0x000
+#define op_subf_c 0x001
+#define op_mulf_c 0x002
+#define op_divf_c 0x003
+#define op_cvtdg_c 0x01e
+#define op_addg_c 0x020
+#define op_subg_c 0x021
+#define op_mulg_c 0x022
+#define op_divg_c 0x023
+#define op_cvtgf_c 0x02c
+#define op_cvtgd_c 0x02d
+#define op_cvtgqg_c 0x02f
+#define op_cvtqf_c 0x03c
+#define op_cvtqg_c 0x03e
+#define op_addf 0x080
+#define op_subf 0x081
+#define op_mulf 0x082
+#define op_divf 0x083
+#define op_cvtdg 0x09e
+#define op_addg 0x0a0
+#define op_subg 0x0a1
+#define op_mulg 0x0a2
+#define op_divg 0x0a3
+#define op_cmpgeq 0x0a5
+#define op_cmpglt 0x0a6
+#define op_cmpgle 0x0a7
+#define op_cvtgf 0x0ac
+#define op_cvtgd 0x0ad
+#define op_cvtgq 0x0af
+#define op_cvtqf 0x0bc
+#define op_cvtqg 0x0be
+#define op_addf_uc 0x100
+#define op_subf_uc 0x101
+#define op_mulf_uc 0x102
+#define op_divf_uc 0x103
+#define op_cvtdg_uc 0x11e
+#define op_addg_uc 0x120
+#define op_subg_uc 0x121
+#define op_mulg_uc 0x122
+#define op_divg_uc 0x123
+#define op_cvtgf_uc 0x12c
+#define op_cvtgd_uc 0x12d
+#define op_cvtgqg_vc 0x12f
+#define op_addf_u 0x180
+#define op_subf_u 0x181
+#define op_mulf_u 0x182
+#define op_divf_u 0x183
+#define op_cvtdg_u 0x19e
+#define op_addg_u 0x1a0
+#define op_subg_u 0x1a1
+#define op_mulg_u 0x1a2
+#define op_divg_u 0x1a3
+#define op_cvtgf_u 0x1ac
+#define op_cvtgd_u 0x1ad
+#define op_cvtgqg_v 0x1af
+#define op_addf_sc 0x400
+#define op_subf_sc 0x401
+#define op_mulf_sc 0x402
+#define op_divf_sc 0x403
+#define op_cvtdg_sc 0x41e
+#define op_addg_sc 0x420
+#define op_subg_sc 0x421
+#define op_mulg_sc 0x422
+#define op_divg_sc 0x423
+#define op_cvtgf_sc 0x42c
+#define op_cvtgd_sc 0x42d
+#define op_cvtgqg_sc 0x42f
+#define op_cvtqf_sc 0x43c
+#define op_cvtqg_sc 0x43e
+#define op_addf_s 0x480
+#define op_subf_s 0x481
+#define op_mulf_s 0x482
+#define op_divf_s 0x483
+#define op_cvtdg_s 0x49e
+#define op_addg_s 0x4a0
+#define op_subg_s 0x4a1
+#define op_mulg_s 0x4a2
+#define op_divg_s 0x4a3
+#define op_cmpgeq_s 0x4a5
+#define op_cmpglt_s 0x4a6
+#define op_cmpgle_s 0x4a7
+#define op_cvtgf_s 0x4ac
+#define op_cvtgd_s 0x4ad
+#define op_cvtgqg_s 0x4af
+#define op_cvtqf_s 0x4bc
+#define op_cvtqg_s 0x4be
+#define op_addf_suc 0x500
+#define op_subf_suc 0x501
+#define op_mulf_suc 0x502
+#define op_divf_suc 0x503
+#define op_cvtdg_suc 0x51e
+#define op_addg_suc 0x520
+#define op_subg_suc 0x521
+#define op_mulg_suc 0x522
+#define op_divg_suc 0x523
+#define op_cvtgf_suc 0x52c
+#define op_cvtgd_suc 0x52d
+#define op_cvtgqg_svc 0x52f
+#define op_addf_su 0x580
+#define op_subf_su 0x581
+#define op_mulf_su 0x582
+#define op_divf_su 0x583
+#define op_cvtdg_su 0x59e
+#define op_addg_su 0x5a0
+#define op_subg_su 0x5a1
+#define op_mulg_su 0x5a2
+#define op_divg_su 0x5a3
+#define op_cvtgf_su 0x5ac
+#define op_cvtgd_su 0x5ad
+#define op_cvtgqg_sv 0x5af
+
+
+#endif /* _ALPHA_INSTRUCTION_H_ */
diff --git a/alpha/include/mach/alpha/asm.h b/alpha/include/mach/alpha/asm.h
new file mode 100644
index 00000000..f69a7efa
--- /dev/null
+++ b/alpha/include/mach/alpha/asm.h
@@ -0,0 +1,539 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: asm.h,v $
+ * Revision 2.4 93/05/20 21:00:24 mrt
+ * Changed uses of the zero register to ra in calls to .frame.
+ * [93/05/18 mrt]
+ *
+ * Revision 2.3 93/03/09 10:55:53 danner
+ * For GAS, use a space in .ent directives.
+ * [93/03/05 af]
+ *
+ * Revision 2.2 93/01/14 17:40:43 danner
+ * Created.
+ * [91/12/11 af]
+ *
+ */
+/*
+ * File: asm.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/91
+ *
+ * Assembly coding style
+ *
+ * This file contains macros and register defines to
+ * aid in writing more readable assembly code.
+ * Some rules to make assembly code understandable by
+ * a debugger are also noted.
+ *
+ * The document
+ *
+ * "ALPHA Calling Standard", DEC 27-Apr-90
+ *
+ * defines (a superset of) the rules and conventions
+ * we use. While we make no promise of adhering to
+ * such standard and its evolution (esp where we
+ * can get faster code paths) it is certainly intended
+ * that we be interoperable with such standard.
+ *
+ * In this sense, this file is a proper part of the
+ * definition of the (software) Alpha architecture.
+ */
+
+/*
+ * Symbolic register names and register saving rules
+ *
+ * Legend:
+ * T Saved by caller (Temporaries)
+ * S Saved by callee (call-Safe registers)
+ */
+
+#define v0 $0 /* (T) return value */
+#define t0 $1 /* (T) temporary registers */
+#define t1 $2
+#define t2 $3
+#define t3 $4
+#define t4 $5
+#define t5 $6
+#define t6 $7
+#define t7 $8
+
+#define s0 $9 /* (S) call-safe registers */
+#define s1 $10
+#define s2 $11
+#define s3 $12
+#define s4 $13
+#define s5 $14
+#define s6 $15
+#define a0 $16 /* (T) argument registers */
+#define a1 $17
+#define a2 $18
+#define a3 $19
+#define a4 $20
+#define a5 $21
+#define t8 $22 /* (T) temporary registers */
+#define t9 $23
+#define t10 $24
+#define t11 $25
+#define ra $26 /* (T) return address */
+#define t12 $27 /* (T) another temporary */
+#define at $28 /* (T) assembler scratch */
+#define gp $29 /* (T) (local) data pointer */
+#define sp $30 /* (S) stack pointer */
+#define zero $31 /* wired zero */
+
+/* Floating point registers (XXXX VERIFY THIS) */
+#define fv0 $f0 /* (T) return value (real) */
+#define fv1 $f1 /* (T) return value (imaginary)*/
+#define ft0 fv1
+#define fs0 $f2 /* (S) call-safe registers */
+#define fs1 $f3
+#define fs2 $f4
+#define fs3 $f5
+#define fs4 $f6
+#define fs5 $f7
+#define fs6 $f8
+#define fs7 $f9
+#define ft1 $f10 /* (T) temporary registers */
+#define ft2 $f11
+#define ft3 $f12
+#define ft4 $f13
+#define ft5 $f14
+#define ft6 $f15
+#define fa0 $f16 /* (T) argument registers */
+#define fa1 $f17
+#define fa2 $f18
+#define fa3 $f19
+#define fa4 $f20
+#define fa5 $f21
+#define ft7 $f22 /* (T) more temporaries */
+#define ft8 $f23
+#define ft9 $f24
+#define ft10 $f25
+#define ft11 $f26
+#define ft12 $f27
+#define ft13 $f28
+#define ft14 $f29
+#define ft15 $f30
+#define fzero $f31 /* wired zero */
+
+
+/* Other DEC standard names */
+#define ai $25 /* (T) argument information */
+#define pv $27 /* (T) procedure value */
+
+/*
+ *
+ * Debuggers need symbol table information to be able to properly
+ * decode a stack trace. The minimum that should be provided is:
+ *
+ * name:
+ * .proc name,numargs
+ *
+ * where "name" is the function's name;
+ * "numargs" how many arguments it expects. For varargs
+ * procedures this should be a negative number,
+ * indicating the minimum required number of
+ * arguments (which is at least 1);
+ *
+ * NESTED functions (functions that call other functions) should define
+ * how they handle their stack frame in a .frame directive:
+ *
+ * .frame framesize, pc_reg, i_mask, f_mask
+ *
+ * where "framesize" is the size of the frame for this function, in bytes.
+ * That is:
+ * new_sp + framesize == old_sp
+ * Framesizes should be rounded to a cacheline size.
+ * Note that old_sp plays the role of a conventional
+ * "frame pointer";
+ * "pc_reg" is either a register which preserves the caller's PC
+ * or 'std', if std the saved PC should be stored at
+ * old_sp-8
+ * "i_mask" is a bitmask that indicates which of the integer
+ * registers are saved. See the M_xx defines at the
+ * end for the encoding of this 32bit value.
+ * "f_mask" is the same, for floating point registers.
+ *
+ * Note that registers should be saved starting at "old_sp-8", where the
+ * return address should be stored. Other registers follow at -16-24-32..
+ * starting from register 0 (if saved) and up. Then float registers (ifany)
+ * are saved.
+ *
+ * If you need to alias a leaf function, or to provide multiple entry points
+ * use the LEAF() macro for the main entry point and XLEAF() for the other
+ * additional/alternate entry points.
+ * "XLEAF"s must be nested within a "LEAF" and a ".end".
+ * Similar rules for nested routines, e.g. use NESTED/XNESTED
+ * Symbols that should not be exported can be declared with the STATIC_xxx
+ * macros.
+ *
+ * All functions must be terminated by the END macro
+ *
+ * It is conceivable, although currently at the limits of compiler
+ * technology, that while performing inter-procedural optimizations
+ * the compiler/linker be able to avoid unnecessary register spills
+ * if told about the register usage of LEAF procedures (and by transitive
+ * closure of NESTED procedures as well). Assembly code can help
+ * this process using the .reguse directive:
+ *
+ * .reguse i_mask, f_mask
+ *
+ * where the register masks are built as above or-ing M_xx defines.
+ *
+ *
+ * All symbols are internal unless EXPORTed. Symbols that are IMPORTed
+ * must be appropriately described to the debugger.
+ *
+ */
+
+/*
+ * LEAF
+ * Declare a global leaf function.
+ * A leaf function does not call other functions AND does not
+ * use any register that is callee-saved AND does not modify
+ * the stack pointer.
+ */
+#define LEAF(_name_,_n_args_) \
+ .globl _name_; \
+ .ent _name_ 0; \
+_name_:; \
+ .frame sp,0,ra
+/* should have been
+ .proc _name_,_n_args_; \
+ .frame 0,ra,0,0
+*/
+
+/*
+ * STATIC_LEAF
+ * Declare a local leaf function.
+ */
+#define STATIC_LEAF(_name_,_n_args_) \
+ .ent _name_ 0; \
+_name_:; \
+ .frame sp,0,ra
+/* should have been
+ .proc _name_,_n_args_; \
+ .frame 0,ra,0,0
+*/
+/*
+ * XLEAF
+ * Global alias for a leaf function, or alternate entry point
+ */
+#define XLEAF(_name_,_n_args_) \
+ .globl _name_; \
+ .aent _name_ 0; \
+_name_:
+/* should have been
+ .aproc _name_,_n_args_;
+*/
+
+/*
+ * STATIC_XLEAF
+ * Local alias for a leaf function, or alternate entry point
+ */
+#define STATIC_XLEAF(_name_,_n_args_) \
+ .aent _name_ 0; \
+_name_:
+/* should have been
+ .aproc _name_,_n_args_;
+*/
+
+/*
+ * NESTED
+ * Declare a (global) nested function
+ * A nested function calls other functions and needs
+ * therefore stack space to save/restore registers.
+ */
+#define NESTED(_name_, _n_args_, _framesize_, _pc_reg_, _i_mask_, _f_mask_ ) \
+ .globl _name_; \
+ .ent _name_ 0; \
+_name_:; \
+ .frame sp,_framesize_,_pc_reg_; \
+ .livereg _i_mask_,_f_mask_
+/* should have been
+ .proc _name_,_n_args_; \
+ .frame _framesize_, _pc_reg_, _i_mask_, _f_mask_
+*/
+
+/*
+ * STATIC_NESTED
+ * Declare a local nested function.
+ */
+#define STATIC_NESTED(_name_, _n_args_, _framesize_, _pc_reg_, _i_mask_, _f_mask_ ) \
+ .ent _name_ 0; \
+_name_:; \
+ .frame sp,_framesize_,_pc_reg_; \
+ .livereg _i_mask_,_f_mask_
+/* should have been
+ .proc _name_,_n_args_; \
+ .frame _framesize_, _pc_reg_, _i_mask_, _f_mask_
+*/
+
+/*
+ * XNESTED
+ * Same as XLEAF, for a nested function.
+ */
+#define XNESTED(_name_,_n_args_) \
+ .globl _name_; \
+ .aent _name_ 0; \
+_name_:
+/* should have been
+ .aproc _name_,_n_args_;
+*/
+
+
+/*
+ * STATIC_XNESTED
+ * Same as STATIC_XLEAF, for a nested function.
+ */
+#define STATIC_XNESTED(_name_,_n_args_) \
+ .aent _name_ 0; \
+_name_:
+/* should have been
+ .aproc _name_,_n_args_;
+*/
+
+
+/*
+ * END
+ * Function delimiter
+ */
+#define END(_name_) \
+ .end _name_
+
+
+/*
+ * CALL
+ * Function invocation
+ */
+#define CALL(_name_) \
+ jsr ra,_name_; \
+ ldgp gp,0(ra)
+/* but this would cover longer jumps
+ br ra,.+4; \
+ bsr ra,_name_
+*/
+
+
+/*
+ * RET
+ * Return from function
+ */
+#define RET \
+ ret zero,(ra),1
+
+
+/*
+ * EXPORT
+ * Export a symbol
+ */
+#define EXPORT(_name_) \
+ .globl _name_; \
+_name_:
+
+
+/*
+ * IMPORT
+ * Make an external name visible, typecheck the size
+ */
+#define IMPORT(_name_, _size_) \
+ .extern _name_,_size_
+
+
+/*
+ * ABS
+ * Define an absolute symbol
+ */
+#define ABS(_name_, _value_) \
+ .globl _name_; \
+_name_ = _value_
+
+
+/*
+ * BSS
+ * Allocate un-initialized space for a global symbol
+ */
+#define BSS(_name_,_numbytes_) \
+ .comm _name_,_numbytes_
+
+/*
+ * VECTOR
+ * Make an exception entry point look like a called function,
+ * to make it digestible to the debugger (KERNEL only)
+ */
+#define VECTOR(_name_, _i_mask_) \
+ .globl _name_; \
+ .ent _name_ 0; \
+_name_:; \
+ .mask _i_mask_|IM_EXC,0; \
+ .frame sp,MSS_SIZE,ra;
+/* .livereg _i_mask_|IM_EXC,0
+/* should have been
+ .proc _name_,1; \
+ .frame MSS_SIZE,$31,_i_mask_,0; \
+*/
+
+/*
+ * MSG
+ * Allocate space for a message (a read-only ascii string)
+ */
+#define MSG(msg,reg) \
+ br reg,9f; \
+ .asciiz msg; \
+ .align 4; \
+9:
+
+/*
+ * PRINTF
+ * Print a message
+ */
+#define PRINTF(msg) \
+ MSG(msg,a0); \
+ CALL(printf)
+
+/*
+ * PANIC
+ * Fatal error (KERNEL)
+ */
+#define PANIC(msg) \
+ MSG(msg,a0); \
+ CALL(panic)
+
+/*
+ * UNIX AND UNIXOID TRAPS
+ * does not belong here, really
+ */
+#ifdef __STDC__
+#define SYSCALL(_name_,_n_args_) \
+LEAF(_name_,_n_args_); \
+ ldiq v0,SYS_ ## _name_; \
+ call_pal 0x83; \
+ beq a3,9f; \
+ br zero,_cerror; \
+9:
+#else /* __STDC__ */
+#define SYSCALL(_name_,_n_args_) \
+LEAF(_name_,_n_args_); \
+ ldiq v0,SYS_/**/_name_; \
+ call_pal 0x83; \
+ beq a3,9f; \
+ br zero,_cerror; \
+9:
+#endif /* __STDC__ */
+
+#ifdef __STDC__
+#define PSEUDO(_name_,_true_name_,_n_args_) \
+LEAF(_name_,_n_args_); \
+ ldiq v0,SYS_ ## _true_name_; \
+ call_pal 0x83;
+#else /* __STDC__ */
+#define PSEUDO(_name_,_true_name_,_n_args_) \
+LEAF(_name_,_n_args_); \
+ ldiq v0,SYS_/**/_true_name_; \
+ call_pal 0x83;
+#endif /* __STDC__ */
+
+
+/*
+ * Register mask defines, used to define both save
+ * and use register sets.
+ *
+ * NOTE: The bit order should HAVE BEEN maintained when saving
+ * registers on the stack: sp goes at the highest
+ * address, gp lower on the stack, etc etc
+ * BUT NOONE CARES ABOUT DEBUGGERS AT MIPS
+ */
+
+#define IM_EXC 0x80000000
+#define IM_SP 0x40000000
+#define IM_GP 0x20000000
+#define IM_AT 0x10000000
+#define IM_T12 0x08000000
+# define IM_PV IM_T4
+#define IM_RA 0x04000000
+#define IM_T11 0x02000000
+# define IM_AI IM_T3
+#define IM_T10 0x01000000
+#define IM_T9 0x00800000
+#define IM_T8 0x00400000
+#define IM_A5 0x00200000
+#define IM_A4 0x00100000
+#define IM_A3 0x00080000
+#define IM_A2 0x00040000
+#define IM_A1 0x00020000
+#define IM_A0 0x00010000
+#define IM_S6 0x00008000
+#define IM_S5 0x00004000
+#define IM_S4 0x00002000
+#define IM_S3 0x00001000
+#define IM_S2 0x00000800
+#define IM_S1 0x00000400
+#define IM_S0 0x00000200
+#define IM_T7 0x00000100
+#define IM_T6 0x00000080
+#define IM_T5 0x00000040
+#define IM_T4 0x00000020
+#define IM_T3 0x00000010
+#define IM_T2 0x00000008
+#define IM_T1 0x00000004
+#define IM_T0 0x00000002
+#define IM_V0 0x00000001
+
+#define FM_T15 0x40000000
+#define FM_T14 0x20000000
+#define FM_T13 0x10000000
+#define FM_T12 0x08000000
+#define FM_T11 0x04000000
+#define FM_T10 0x02000000
+#define FM_T9 0x01000000
+#define FM_T8 0x00800000
+#define FM_T7 0x00400000
+#define FM_A5 0x00200000
+#define FM_A4 0x00100000
+#define FM_A3 0x00080000
+#define FM_A2 0x00040000
+#define FM_A1 0x00020000
+#define FM_A0 0x00010000
+#define FM_T6 0x00008000
+#define FM_T5 0x00004000
+#define FM_T4 0x00002000
+#define FM_T3 0x00001000
+#define FM_T2 0x00000800
+#define FM_T1 0x00000400
+#define FM_S7 0x00000200
+#define FM_S6 0x00000100
+#define FM_S5 0x00000080
+#define FM_S4 0x00000040
+#define FM_S3 0x00000020
+#define FM_S2 0x00000010
+#define FM_S1 0x00000008
+#define FM_S0 0x00000004
+#define FM_T0 0x00000002
+#define FM_V1 FM_T0
+#define FM_V0 0x00000001
diff --git a/alpha/include/mach/alpha/boolean.h b/alpha/include/mach/alpha/boolean.h
new file mode 100644
index 00000000..f9d92a6f
--- /dev/null
+++ b/alpha/include/mach/alpha/boolean.h
@@ -0,0 +1,50 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: boolean.h,v $
+ * Revision 2.2 93/01/14 17:40:47 danner
+ * Copied for alpha.
+ * [92/12/17 af]
+ *
+ */
+
+/*
+ * File: alpha/boolean.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 1/89
+ *
+ *
+ * Boolean type, for Alpha.
+ *
+ */
+
+#ifndef _MACH_ALPHA_BOOLEAN_H_
+#define _MACH_ALPHA_BOOLEAN_H_
+
+typedef int boolean_t;
+
+#endif _MACH_ALPHA_BOOLEAN_H_
diff --git a/alpha/include/mach/alpha/exception.h b/alpha/include/mach/alpha/exception.h
new file mode 100644
index 00000000..d53e7521
--- /dev/null
+++ b/alpha/include/mach/alpha/exception.h
@@ -0,0 +1,88 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: exception.h,v $
+ * Revision 2.2 93/01/14 17:40:51 danner
+ * Created.
+ * [91/12/29 af]
+ *
+ */
+/*
+ * File: alpha/exception.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/91
+ *
+ * Codes and subcodes for Alpha exceptions.
+ */
+
+#ifndef _MACH_ALPHA_EXCEPTION_H_
+#define _MACH_ALPHA_EXCEPTION_H_
+
+/*
+ * Hardware level exceptions
+ */
+
+
+/*
+ * Software exception codes
+ */
+
+
+/*
+ * Bad instruction subcodes
+ */
+
+#define EXC_ALPHA_PRIVINST 1
+#define EXC_ALPHA_RESOPND 2
+#define EXC_ALPHA_RESADDR 3
+
+/*
+ * EXC_ARITHMETIC subcodes
+ *
+ * NOTE: This is incompatible with OSF1's definitions.
+ * The reason is that more than one exception might
+ * be reported at once, so we want to OR the bits.
+ *
+ * The subcode argument is the "register write mask".
+ */
+#define EXC_ALPHA_FLT_COMPLETE 0x01
+#define EXC_ALPHA_FLT_INVALID 0x02
+#define EXC_ALPHA_FLT_DIVIDE0 0x04
+#define EXC_ALPHA_FLT_FOVERFLOW 0x08
+#define EXC_ALPHA_FLT_UNDERFLOW 0x10
+#define EXC_ALPHA_FLT_INEXACT 0x20
+#define EXC_ALPHA_FLT_IOVERFLOW 0x40
+
+/*
+ * EXC_BREAKPOINT subcodes
+ */
+
+#define EXC_BREAK_BPT 0
+#define EXC_BREAK_SSTEP 1
+
+
+#endif _MACH_ALPHA_EXCEPTION_H_
diff --git a/alpha/include/mach/alpha/kern_return.h b/alpha/include/mach/alpha/kern_return.h
new file mode 100644
index 00000000..0eb90a1f
--- /dev/null
+++ b/alpha/include/mach/alpha/kern_return.h
@@ -0,0 +1,57 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: kern_return.h,v $
+ * Revision 2.2 93/01/14 17:40:55 danner
+ * Created.
+ * [91/12/29 af]
+ *
+ */
+
+/*
+ * File: alpha/kern_return.h
+ * Author: Alessandro Forin
+ * Date: 12/91
+ *
+ * Machine-dependent kernel return definitions.
+ *
+ */
+
+#ifndef _MACH_ALPHA_KERN_RETURN_H_
+#define _MACH_ALPHA_KERN_RETURN_H_
+
+#ifndef ASSEMBLER
+/*
+ * Strictly speaking, this is should be a long (64 bits)
+ * because the width of the return register is such.
+ * However, (a) it does not need to and (b) it would
+ * hurt MiG badly to do so.
+ */
+typedef int kern_return_t;
+#endif ASSEMBLER
+
+#endif _MACH_ALPHA_KERN_RETURN_H_
diff --git a/alpha/include/mach/alpha/machine_types.defs b/alpha/include/mach/alpha/machine_types.defs
new file mode 100644
index 00000000..7ba7723c
--- /dev/null
+++ b/alpha/include/mach/alpha/machine_types.defs
@@ -0,0 +1,78 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1992 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: machine_types.defs,v $
+ * Revision 2.2 93/01/14 17:41:00 danner
+ * Created.
+ * [92/08/04 af]
+ *
+ */
+/*
+ * File: alpha/machine_types.defs
+ * Author: Alessandro Forin
+ * Date: 7/92
+ *
+ * Header file for basic, machine-dependent data types. Alpha version.
+ *
+ */
+
+#ifndef _MACHINE_VM_TYPES_DEFS_
+#define _MACHINE_VM_TYPES_DEFS_ 1
+
+/*
+ * A natural_t is the type for the native
+ * integer type, e.g. 32 or 64 or.. whatever
+ * register size the machine has. Unsigned, it is
+ * used for entities that might be either
+ * unsigned integers or pointers, and for
+ * type-casting between the two.
+ * For instance, the IPC system represents
+ * a port in user space as an integer and
+ * in kernel space as a pointer.
+ */
+type natural_t = unsigned64;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+type integer_t = int64;
+
+
+#if MACH_IPC_COMPAT
+/*
+ * For the old IPC interface
+ */
+#define MSG_TYPE_PORT_NAME MACH_MSG_TYPE_INTEGER_64
+
+#endif /* MACH_IPC_COMPAT */
+
+
+#endif /* _MACHINE_VM_TYPES_DEFS_ */
diff --git a/alpha/include/mach/alpha/syscall_sw.h b/alpha/include/mach/alpha/syscall_sw.h
new file mode 100644
index 00000000..a382d5e0
--- /dev/null
+++ b/alpha/include/mach/alpha/syscall_sw.h
@@ -0,0 +1,139 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: syscall_sw.h,v $
+ * Revision 2.3 93/03/09 10:55:56 danner
+ * Changed .ent stmts to use spaces.
+ * [93/02/15 af]
+ *
+ * Revision 2.2 93/01/14 17:41:03 danner
+ * Created.
+ * [91/12/29 af]
+ *
+ */
+
+/*
+ * File: alpha/syscall_sw.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/91
+ *
+ * Mach syscall trap argument passing convention, on Alpha
+ */
+
+#ifndef _MACH_ALPHA_SYSCALL_SW_H_
+#define _MACH_ALPHA_SYSCALL_SW_H_ 1
+
+#include <mach/alpha/asm.h>
+
+/*
+ * Unix kernels expects arguments to be passed with the normal C calling
+ * sequence (a0-a5+stack), v0 contains the system call number on entry
+ * and v0-at contain the results from the call, and a3 the success/fail.
+ *
+ * On Mach we pass all the arguments in registers, the trap number is in v0
+ * and the return value is placed in v0. There are no awful hacks for
+ * returning multiple values from a trap.
+ *
+ * Performance: a trap with up to 6 args takes 3 cycles in user mode,
+ * with no memory accesses. Any arg after the sixth takes 1 more cycle
+ * to load from the cache (which cannot possibly miss) into a register.
+ */
+
+/*
+ * A simple trap is one with up to 6 args. Args are passed to us
+ * in registers, and we keep them there.
+ */
+#define simple_kernel_trap(trap_name, trap_number, nargs) \
+ .globl trap_name; \
+ .ent trap_name 0; \
+trap_name:; \
+ .frame sp,0,ra; \
+ lda v0,trap_number(zero); \
+ call_pal 0x83; \
+ RET; \
+ .end trap_name
+
+#define kernel_trap_0(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,0)
+#define kernel_trap_1(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,1)
+#define kernel_trap_2(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,2)
+#define kernel_trap_3(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,3)
+#define kernel_trap_4(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,4)
+#define kernel_trap_5(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,5)
+#define kernel_trap_6(trap_name,trap_number) \
+ simple_kernel_trap(trap_name,trap_number,6)
+
+/*
+ * A trap with more than 6 args requires popping of args
+ * off the stack, where they are placed by the compiler
+ * or by the user.
+ */
+#define kernel_trap_7(trap_name, trap_number) \
+ .globl trap_name; \
+ .ent trap_name 0; \
+trap_name:; \
+ .frame sp,0,ra; \
+ ldq t0,0(sp); \
+ lda v0,trap_number(zero); \
+ call_pal 0x83; \
+ RET; \
+ .end trap_name
+
+#define kernel_trap_11(trap_name, trap_number) \
+ .globl trap_name; \
+ .ent trap_name 0; \
+trap_name:; \
+ .frame sp,0,ra; \
+ ldq t0,0(sp); \
+ ldq t1,8(sp); \
+ ldq t2,16(sp); \
+ ldq t3,24(sp); \
+ ldq t4,32(sp); \
+ lda v0,trap_number(zero); \
+ call_pal 0x83; \
+ RET; \
+ .end trap_name
+
+/*
+ * There are no Mach traps with more than 11 args.
+ * If that changes, the kernel needs to be fixed also.
+ */
+
+#ifdef __STDC__
+#define kernel_trap(trap_name,trap_number,nargs) \
+ kernel_trap_ ## nargs(trap_name,trap_number)
+#else /* __STDC__ */
+#define kernel_trap(trap_name,trap_number,nargs) \
+ kernel_trap_/**/nargs(trap_name,trap_number)
+#endif
+
+#endif _MACH_ALPHA_SYSCALL_SW_H_
diff --git a/alpha/include/mach/alpha/thread_status.h b/alpha/include/mach/alpha/thread_status.h
new file mode 100644
index 00000000..3060746b
--- /dev/null
+++ b/alpha/include/mach/alpha/thread_status.h
@@ -0,0 +1,156 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: thread_status.h,v $
+ * Revision 2.2 93/01/14 17:41:08 danner
+ * Revised for new calling sequence.
+ * [92/06/07 af]
+ *
+ * Created.
+ * [91/12/29 af]
+ *
+ */
+
+/*
+ * File: alpha/thread_status.h
+ * Author: Alessandro Forin, Carnegie Mellon University
+ * Date: 12/91
+ *
+ *
+ * This file contains the structure definitions for the thread
+ * state as applicable to Alpha processors.
+ *
+ */
+
+#ifndef _MACH_ALPHA_THREAD_STATE_
+#define _MACH_ALPHA_THREAD_STATE_
+
+#include <mach/machine/vm_types.h>
+
+/*
+ * The structures defined in here are exported to users for
+ * use in status/mutate calls.
+ *
+ * alpha_thread_state basic machine state
+ *
+ * alpha_float_state state of floating point coprocessor
+ *
+ * alpha_exc_state exception state (fault address, etc.)
+ */
+
+#define ALPHA_THREAD_STATE (1)
+#define ALPHA_FLOAT_STATE (2)
+#define ALPHA_EXC_STATE (3)
+
+struct alpha_thread_state {
+ integer_t r0; /* v0: return value */
+ integer_t r1; /* t0: caller saved 0 */
+ integer_t r2; /* t1: caller saved 1 */
+ integer_t r3; /* t2: caller saved 2 */
+ integer_t r4; /* t3: caller saved 3 */
+ integer_t r5; /* t4: caller saved 4 */
+ integer_t r6; /* t5: caller saved 5 */
+ integer_t r7; /* t6: caller saved 6 */
+ integer_t r8; /* t7: caller saved 7 */
+ integer_t r9; /* s0: callee saved 0 */
+ integer_t r10; /* s1: callee saved 1 */
+ integer_t r11; /* s2: callee saved 2 */
+ integer_t r12; /* s3: callee saved 3 */
+ integer_t r13; /* s4: callee saved 4 */
+ integer_t r14; /* s5: callee saved 5 */
+ integer_t r15; /* s6: callee saved 6 */
+ integer_t r16; /* a0: argument 0 */
+ integer_t r17; /* a1: argument 1 */
+ integer_t r18; /* a2: argument 2 */
+ integer_t r19; /* a3: argument 3 */
+ integer_t r20; /* a4: argument 4 */
+ integer_t r21; /* a5: argument 5 */
+ integer_t r22; /* t8: caller saved 8 */
+ integer_t r23; /* t9: caller saved 9 */
+ integer_t r24; /* t10: caller saved 10 */
+ integer_t r25; /* t11: caller saved 11 */
+ integer_t r26; /* ra: return address */
+ integer_t r27; /* pv: procedure value (caller saved) */
+ integer_t r28; /* at: assembler temporary */
+ integer_t r29; /* gp: procedure's data pointer */
+ integer_t r30; /* sp: stack pointer */
+/* integer_t r31; /* wired zero, not returned */
+ integer_t pc; /* user-mode PC */
+};
+
+#define ALPHA_THREAD_STATE_COUNT (sizeof(struct alpha_thread_state)/sizeof(natural_t))
+
+
+struct alpha_float_state {
+ integer_t r0; /* 31 general registers + status */
+ integer_t r1;
+ integer_t r2;
+ integer_t r3;
+ integer_t r4;
+ integer_t r5;
+ integer_t r6;
+ integer_t r7;
+ integer_t r8;
+ integer_t r9;
+ integer_t r10;
+ integer_t r11;
+ integer_t r12;
+ integer_t r13;
+ integer_t r14;
+ integer_t r15;
+ integer_t r16;
+ integer_t r17;
+ integer_t r18;
+ integer_t r19;
+ integer_t r20;
+ integer_t r21;
+ integer_t r22;
+ integer_t r23;
+ integer_t r24;
+ integer_t r25;
+ integer_t r26;
+ integer_t r27;
+ integer_t r28;
+ integer_t r29;
+ integer_t r30;
+/* integer_t r31; wired zero */
+ integer_t csr; /* status register */
+};
+
+#define ALPHA_FLOAT_STATE_COUNT (sizeof(struct alpha_float_state)/sizeof(natural_t))
+
+
+struct alpha_exc_state {
+ vm_offset_t address; /* last invalid virtual address */
+ unsigned int cause; /* machine-level trap code */
+# define ALPHA_EXC_SET_SSTEP 1
+ boolean_t used_fpa; /* did it ever use floats */
+};
+
+#define ALPHA_EXC_STATE_COUNT (sizeof(struct alpha_exc_state)/sizeof(natural_t))
+
+#endif _MACH_ALPHA_THREAD_STATE_
diff --git a/alpha/include/mach/alpha/vm_param.h b/alpha/include/mach/alpha/vm_param.h
new file mode 100644
index 00000000..5d3b6a0e
--- /dev/null
+++ b/alpha/include/mach/alpha/vm_param.h
@@ -0,0 +1,103 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: vm_param.h,v $
+ * Revision 2.2 93/01/14 17:41:15 danner
+ * Created, partly empty.
+ * [91/12/29 af]
+ *
+ */
+
+/*
+ * File: alpha/vm_param.h
+ * Author: Alessandro Forin
+ * Date: 12/91
+ *
+ * ALPHA machine dependent virtual memory parameters.
+ * Most declarations are preceeded by ALPHA_ (or alpha_)
+ * because only Alpha specific code should be using
+ * them.
+ *
+ */
+
+#ifndef _MACH_ALPHA_VM_PARAM_H_
+#define _MACH_ALPHA_VM_PARAM_H_
+
+#define BYTE_SIZE 8 /* byte size in bits */
+
+#define ALPHA_PGBYTES 8192 /* bytes per alpha (min) phys page */
+#define ALPHA_PGSHIFT 13 /* number of bits to shift for pages */
+
+/*
+ * Convert bytes to pages and convert pages to bytes.
+ * No rounding is used.
+ */
+
+#define alpha_btop(x) (((vm_offset_t)(x)) >> ALPHA_PGSHIFT)
+#define alpha_ptob(x) (((vm_offset_t)(x)) << ALPHA_PGSHIFT)
+
+/*
+ * Round off or truncate to the nearest page. These will work
+ * for either addresses or counts. (i.e. 1 byte rounds to 1 page
+ * bytes.
+ */
+
+#define alpha_round_page(x) ((((vm_offset_t)(x)) + ALPHA_PGBYTES - 1) & \
+ ~(ALPHA_PGBYTES-1))
+#define alpha_trunc_page(x) (((vm_offset_t)(x)) & ~(ALPHA_PGBYTES-1))
+
+/*
+ * User level addressability
+ *
+ * The kernel must be mapped in the user's virtual
+ * space, where to is completely arbitrary. Since
+ * the virtual address range is subject to change
+ * with implementations we cannot specify once and
+ * forall where we place it.
+ * [See alpha/alpha_cpu.h for details]
+ */
+#define VM_MIN_ADDRESS ((vm_offset_t) 0x0)
+#define VM_MAX_ADDRESS ((vm_offset_t) 0x000003fe00000000)
+
+/*
+ * The kernel's virtual address range is a bit arbitrary
+ */
+#define VM_MIN_KERNEL_ADDRESS VM_MAX_ADDRESS
+#define VM_MAX_KERNEL_ADDRESS ((vm_offset_t) 0x0000040000000000)
+
+#define KERNEL_STACK_SIZE ALPHA_PGBYTES
+
+/*
+ * Conversion between ALPHA pages and VM pages
+ */
+
+#define trunc_alpha_to_vm(p) (atop(trunc_page(alpha_ptob(p))))
+#define round_alpha_to_vm(p) (atop(round_page(alpha_ptob(p))))
+#define vm_to_alpha(p) (alpha_btop(ptoa(p)))
+
+
+#endif _MACH_ALPHA_VM_PARAM_H_
diff --git a/alpha/include/mach/alpha/vm_types.h b/alpha/include/mach/alpha/vm_types.h
new file mode 100644
index 00000000..17343132
--- /dev/null
+++ b/alpha/include/mach/alpha/vm_types.h
@@ -0,0 +1,98 @@
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990,1989 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * HISTORY
+ * $Log: vm_types.h,v $
+ * Revision 2.2 93/01/14 17:41:19 danner
+ * Created.
+ * [91/12/29 af]
+ *
+ */
+/*
+ * File: alpha/vm_types.h
+ * Author: Alessandro Forin
+ * Date: 12/91
+ *
+ * Header file for VM data types. Alpha version.
+ *
+ */
+
+#ifndef _MACHINE_VM_TYPES_H_
+#define _MACHINE_VM_TYPES_H_ 1
+
+#ifdef ASSEMBLER
+#else ASSEMBLER
+
+/*
+ * A natural_t is the type for the native
+ * integer type, e.g. 32 or 64 or.. whatever
+ * register size the machine has. Unsigned, it is
+ * used for entities that might be either
+ * unsigned integers or pointers, and for
+ * type-casting between the two.
+ * For instance, the IPC system represents
+ * a port in user space as an integer and
+ * in kernel space as a pointer.
+ */
+typedef unsigned long natural_t;
+
+/*
+ * An integer_t is the signed counterpart
+ * of the natural_t type. Both types are
+ * only supposed to be used to define
+ * other types in a machine-independent
+ * way.
+ */
+typedef long integer_t;
+
+/*
+ * An int32 is an integer that is at least 32 bits wide
+ */
+typedef int int32;
+typedef unsigned int uint32;
+
+/*
+ * A vm_offset_t is a type-neutral pointer,
+ * e.g. an offset into a virtual memory space.
+ */
+typedef natural_t vm_offset_t;
+
+/*
+ * A vm_size_t is the proper type for e.g.
+ * expressing the difference between two
+ * vm_offset_t entities.
+ */
+typedef natural_t vm_size_t;
+
+#endif ASSEMBLER
+
+/*
+ * If composing messages by hand (please dont)
+ */
+
+#define MACH_MSG_TYPE_INTEGER_T MACH_MSG_TYPE_INTEGER_64
+
+#endif /* _MACHINE_VM_TYPES_H_ */