summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore13
-rw-r--r--Makefile.am17
-rw-r--r--Makefrag.am62
-rw-r--r--Makerules.am2
-rw-r--r--Makerules.mig.am15
-rw-r--r--NEWS50
-rw-r--r--chips/busses.c26
-rw-r--r--chips/busses.h8
-rw-r--r--configfrag.ac6
-rw-r--r--ddb/db_access.c42
-rw-r--r--ddb/db_access.h6
-rw-r--r--ddb/db_aout.c118
-rw-r--r--ddb/db_aout.h52
-rw-r--r--ddb/db_break.c125
-rw-r--r--ddb/db_break.h18
-rw-r--r--ddb/db_command.c80
-rw-r--r--ddb/db_command.h15
-rw-r--r--ddb/db_cond.c22
-rw-r--r--ddb/db_cond.h2
-rw-r--r--ddb/db_elf.c232
-rw-r--r--ddb/db_elf.h52
-rw-r--r--ddb/db_examine.c71
-rw-r--r--ddb/db_examine.h28
-rw-r--r--ddb/db_expr.c36
-rw-r--r--ddb/db_expr.h2
-rw-r--r--ddb/db_ext_symtab.c12
-rw-r--r--ddb/db_input.c43
-rw-r--r--ddb/db_input.h2
-rw-r--r--ddb/db_lex.c31
-rw-r--r--ddb/db_lex.h9
-rw-r--r--ddb/db_macro.c40
-rw-r--r--ddb/db_macro.h4
-rw-r--r--ddb/db_mp.c37
-rw-r--r--ddb/db_mp.h30
-rw-r--r--ddb/db_output.c31
-rw-r--r--ddb/db_output.h10
-rw-r--r--ddb/db_print.c119
-rw-r--r--ddb/db_print.h14
-rw-r--r--ddb/db_run.c77
-rw-r--r--ddb/db_run.h36
-rw-r--r--ddb/db_sym.c137
-rw-r--r--ddb/db_sym.h42
-rw-r--r--ddb/db_task_thread.c88
-rw-r--r--ddb/db_task_thread.h24
-rw-r--r--ddb/db_trap.c26
-rw-r--r--ddb/db_variables.c46
-rw-r--r--ddb/db_variables.h9
-rw-r--r--ddb/db_watch.c45
-rw-r--r--ddb/db_watch.h12
-rw-r--r--ddb/db_write_cmd.c8
-rw-r--r--ddb/db_write_cmd.h20
-rw-r--r--ddb/stab.h4
-rw-r--r--device/blkio.c15
-rw-r--r--device/blkio.h24
-rw-r--r--device/buf.h10
-rw-r--r--device/chario.c36
-rw-r--r--device/chario.h37
-rw-r--r--device/cirbuf.c8
-rw-r--r--device/cirbuf.h3
-rw-r--r--device/conf.h48
-rw-r--r--device/cons.c18
-rw-r--r--device/cons.h11
-rw-r--r--device/dev_hdr.h10
-rw-r--r--device/dev_lookup.c41
-rw-r--r--device/dev_master.h7
-rw-r--r--device/dev_name.c68
-rw-r--r--device/dev_pager.c101
-rw-r--r--device/dev_pager.h28
-rw-r--r--device/device_init.c13
-rw-r--r--device/device_init.h24
-rw-r--r--device/device_types_kernel.h1
-rw-r--r--device/ds_routines.c206
-rw-r--r--device/ds_routines.h30
-rw-r--r--device/if_ether.h7
-rw-r--r--device/io_req.h2
-rw-r--r--device/kmsg.c17
-rw-r--r--device/kmsg.h2
-rw-r--r--device/net_io.c267
-rw-r--r--device/net_io.h65
-rw-r--r--device/subrs.c36
-rw-r--r--device/tty.h8
-rw-r--r--doc/mach.texi238
-rw-r--r--i386/Makefrag.am31
-rw-r--r--i386/configfrag.ac4
-rw-r--r--i386/grub/acpi.h220
-rw-r--r--i386/grub/compiler.h51
-rw-r--r--i386/grub/cpu/io.h72
-rw-r--r--i386/grub/cpu/time.h29
-rw-r--r--i386/grub/cpu/types.h33
-rw-r--r--i386/grub/err.h96
-rw-r--r--i386/grub/glue.h33
-rw-r--r--i386/grub/i18n.h25
-rw-r--r--i386/grub/misc.h517
-rw-r--r--i386/grub/mm.h77
-rw-r--r--i386/grub/symbol.h72
-rw-r--r--i386/grub/time.h46
-rw-r--r--i386/grub/types.h325
-rw-r--r--i386/i386/ast_check.c14
-rw-r--r--i386/i386/cpu.h110
-rw-r--r--i386/i386/db_disasm.c10
-rw-r--r--i386/i386/db_interface.c125
-rw-r--r--i386/i386/db_interface.h32
-rw-r--r--i386/i386/db_machdep.h1
-rw-r--r--i386/i386/db_trace.c87
-rw-r--r--i386/i386/db_trace.h32
-rw-r--r--i386/i386/debug.h4
-rw-r--r--i386/i386/debug_i386.c35
-rw-r--r--i386/i386/fpu.c130
-rw-r--r--i386/i386/fpu.h3
-rw-r--r--i386/i386/gdt.c2
-rw-r--r--i386/i386/hardclock.c10
-rw-r--r--i386/i386/hardclock.h29
-rw-r--r--i386/i386/idt.c2
-rw-r--r--i386/i386/io_map.c38
-rw-r--r--i386/i386/io_perm.c10
-rw-r--r--i386/i386/io_perm.h2
-rw-r--r--i386/i386/ipl.h4
-rw-r--r--i386/i386/ktss.c2
-rw-r--r--i386/i386/kttd_interface.c2
-rw-r--r--i386/i386/ldt.c5
-rw-r--r--i386/i386/lock.h2
-rw-r--r--i386/i386/locore.S111
-rw-r--r--i386/i386/locore.h2
-rw-r--r--i386/i386/loose_ends.c10
-rw-r--r--i386/i386/machine_routines.h3
-rw-r--r--i386/i386/machine_task.c4
-rw-r--r--i386/i386/model_dep.h8
-rw-r--r--i386/i386/mp_desc.c17
-rw-r--r--i386/i386/mp_desc.h2
-rw-r--r--i386/i386/pcb.c136
-rw-r--r--i386/i386/pcb.h16
-rw-r--r--i386/i386/phys.c110
-rw-r--r--i386/i386/pic.c11
-rw-r--r--i386/i386/pic.h2
-rw-r--r--i386/i386/pit.c4
-rw-r--r--i386/i386/setjmp.h4
-rw-r--r--i386/i386/spl.S36
-rw-r--r--i386/i386/thread.h3
-rw-r--r--i386/i386/trap.c91
-rw-r--r--i386/i386/trap.h9
-rw-r--r--i386/i386/user_ldt.c72
-rw-r--r--i386/i386/user_ldt.h12
-rw-r--r--i386/i386/vm_param.h53
-rw-r--r--i386/i386/vm_tuning.h35
-rw-r--r--i386/i386/xen.h4
-rw-r--r--i386/i386at/acpi.c82
-rw-r--r--i386/i386at/acpihalt.c409
-rw-r--r--i386/i386at/acpihalt.h23
-rw-r--r--i386/i386at/autoconf.c14
-rw-r--r--i386/i386at/biosmem.c910
-rw-r--r--i386/i386at/biosmem.h88
-rw-r--r--i386/i386at/com.c107
-rw-r--r--i386/i386at/com.h33
-rw-r--r--i386/i386at/conf.c62
-rw-r--r--i386/i386at/cons_conf.c7
-rw-r--r--i386/i386at/cram.h4
-rw-r--r--i386/i386at/disk.h103
-rw-r--r--i386/i386at/elf.h61
-rw-r--r--i386/i386at/grub_glue.c67
-rw-r--r--i386/i386at/i8250.h5
-rw-r--r--i386/i386at/idt.h2
-rw-r--r--i386/i386at/immc.c78
-rw-r--r--i386/i386at/immc.h31
-rw-r--r--i386/i386at/int_init.c2
-rw-r--r--i386/i386at/int_init.h2
-rw-r--r--i386/i386at/kd.c533
-rw-r--r--i386/i386at/kd.h66
-rw-r--r--i386/i386at/kd_event.c98
-rw-r--r--i386/i386at/kd_event.h29
-rw-r--r--i386/i386at/kd_mouse.c109
-rw-r--r--i386/i386at/kd_mouse.h13
-rw-r--r--i386/i386at/kd_queue.c10
-rw-r--r--i386/i386at/kd_queue.h9
-rw-r--r--i386/i386at/kdsoft.h5
-rw-r--r--i386/i386at/lpr.c75
-rw-r--r--i386/i386at/lpr.h (renamed from i386/i386at/lprreg.h)33
-rw-r--r--i386/i386at/mem.c2
-rw-r--r--i386/i386at/mem.h24
-rw-r--r--i386/i386at/model_dep.c396
-rw-r--r--i386/i386at/model_dep.h39
-rw-r--r--i386/i386at/pic_isa.c2
-rw-r--r--i386/i386at/rtc.c33
-rw-r--r--i386/i386at/rtc.h9
-rw-r--r--i386/include/mach/i386/asm.h4
-rw-r--r--i386/include/mach/i386/cthreads.h2
-rw-r--r--i386/include/mach/i386/disk.h120
-rw-r--r--i386/include/mach/i386/mach_i386_types.h4
-rw-r--r--i386/include/mach/i386/multiboot.h105
-rw-r--r--i386/include/mach/i386/rpc.h9
-rw-r--r--i386/include/mach/i386/vm_param.h2
-rw-r--r--i386/include/mach/i386/vm_types.h9
-rw-r--r--i386/intel/pmap.c378
-rw-r--r--i386/intel/pmap.h32
-rw-r--r--i386/intel/read_fault.c10
-rw-r--r--include/cache.h25
-rw-r--r--include/device/device.defs19
-rw-r--r--include/device/device_reply.defs8
-rw-r--r--include/device/device_request.defs8
-rw-r--r--include/device/device_types.defs17
-rw-r--r--include/device/device_types.h2
-rw-r--r--include/device/tape_status.h2
-rw-r--r--include/device/tty_status.h5
-rw-r--r--include/mach/alert.h2
-rw-r--r--include/mach/boot.h4
-rw-r--r--include/mach/default_pager_types.defs2
-rw-r--r--include/mach/default_pager_types.h2
-rw-r--r--include/mach/gnumach.defs47
-rw-r--r--include/mach/mach.defs45
-rw-r--r--include/mach/mach_host.defs6
-rw-r--r--include/mach/mach_port.defs32
-rw-r--r--include/mach/mach_types.defs15
-rw-r--r--include/mach/memory_object.defs53
-rw-r--r--include/mach/memory_object_default.defs4
-rw-r--r--include/mach/message.h9
-rw-r--r--include/mach/multiboot.h2
-rw-r--r--include/mach/notify.defs35
-rw-r--r--include/mach/port.h2
-rw-r--r--include/mach/profil.h2
-rw-r--r--include/mach/rpc.h1
-rw-r--r--include/mach/std_types.defs8
-rw-r--r--include/mach/task_notify.defs36
-rw-r--r--include/mach/time_value.h45
-rw-r--r--include/mach/version.h5
-rw-r--r--include/mach/vm_param.h2
-rw-r--r--include/mach_debug/ipc_info.h23
-rw-r--r--include/mach_debug/mach_debug.defs33
-rw-r--r--include/mach_debug/mach_debug_types.defs9
-rw-r--r--include/mach_debug/mach_debug_types.h11
-rw-r--r--include/mach_debug/pc_info.h2
-rw-r--r--include/mach_debug/slab_info.h6
-rw-r--r--include/string.h8
-rw-r--r--ipc/ipc_entry.c795
-rw-r--r--ipc/ipc_entry.h72
-rw-r--r--ipc/ipc_hash.c620
-rw-r--r--ipc/ipc_hash.h96
-rw-r--r--ipc/ipc_init.c16
-rw-r--r--ipc/ipc_kmsg.c344
-rw-r--r--ipc/ipc_kmsg.h29
-rw-r--r--ipc/ipc_kmsg_queue.h2
-rwxr-xr-xipc/ipc_machdep.h4
-rw-r--r--ipc/ipc_marequest.c36
-rw-r--r--ipc/ipc_mqueue.c14
-rw-r--r--ipc/ipc_mqueue.h3
-rw-r--r--ipc/ipc_notify.c42
-rw-r--r--ipc/ipc_object.c61
-rw-r--r--ipc/ipc_object.h6
-rw-r--r--ipc/ipc_port.c87
-rw-r--r--ipc/ipc_port.h29
-rw-r--r--ipc/ipc_print.h25
-rw-r--r--ipc/ipc_pset.c4
-rw-r--r--ipc/ipc_right.c91
-rw-r--r--ipc/ipc_space.c104
-rw-r--r--ipc/ipc_space.h232
-rw-r--r--ipc/ipc_splay.c920
-rw-r--r--ipc/ipc_splay.h114
-rw-r--r--ipc/ipc_table.c70
-rw-r--r--ipc/ipc_table.h59
-rw-r--r--ipc/ipc_thread.h8
-rw-r--r--ipc/mach_debug.c339
-rw-r--r--ipc/mach_msg.c299
-rw-r--r--ipc/mach_port.c262
-rw-r--r--ipc/mach_port.h11
-rw-r--r--ipc/mach_rpc.c9
-rw-r--r--ipc/notify.defs22
-rw-r--r--ipc/port.h7
-rw-r--r--kern/act.c35
-rw-r--r--kern/act.h5
-rw-r--r--kern/assert.h17
-rw-r--r--kern/ast.c24
-rw-r--r--kern/ast.h2
-rw-r--r--kern/boot_script.c6
-rw-r--r--kern/boot_script.h4
-rw-r--r--kern/bootstrap.c82
-rw-r--r--kern/bootstrap.h24
-rw-r--r--kern/counters.c6
-rw-r--r--kern/counters.h6
-rw-r--r--kern/cpu_number.h2
-rw-r--r--kern/debug.c19
-rw-r--r--kern/debug.h4
-rw-r--r--kern/elf-load.c2
-rw-r--r--kern/eventcount.c21
-rw-r--r--kern/eventcount.h7
-rw-r--r--kern/exc.defs22
-rw-r--r--kern/exception.c146
-rw-r--r--kern/exception.h66
-rw-r--r--kern/host.c26
-rw-r--r--kern/ipc_host.c2
-rw-r--r--kern/ipc_kobject.c30
-rw-r--r--kern/ipc_kobject.h5
-rw-r--r--kern/ipc_mig.c229
-rw-r--r--kern/ipc_mig.h79
-rw-r--r--kern/ipc_sched.c12
-rw-r--r--kern/ipc_tt.c68
-rw-r--r--kern/list.h4
-rw-r--r--kern/lock.c100
-rw-r--r--kern/lock.h70
-rw-r--r--kern/lock_mon.c51
-rw-r--r--kern/log2.h50
-rw-r--r--kern/mach_clock.c162
-rw-r--r--kern/mach_clock.h20
-rw-r--r--kern/mach_factor.c16
-rw-r--r--kern/machine.c110
-rw-r--r--kern/machine.h2
-rw-r--r--kern/macro_help.h55
-rw-r--r--kern/macros.h82
-rw-r--r--kern/pc_sample.c42
-rw-r--r--kern/pc_sample.h2
-rw-r--r--kern/printf.c112
-rw-r--r--kern/printf.h16
-rw-r--r--kern/priority.c18
-rw-r--r--kern/priority.h28
-rw-r--r--kern/processor.c51
-rw-r--r--kern/processor.h6
-rw-r--r--kern/profile.c6
-rw-r--r--kern/queue.c24
-rw-r--r--kern/queue.h41
-rw-r--r--kern/rbtree.h10
-rw-r--r--kern/rdxtree.c830
-rw-r--r--kern/rdxtree.h209
-rw-r--r--kern/rdxtree_i.h66
-rw-r--r--kern/refcount.h8
-rw-r--r--kern/sched.h4
-rw-r--r--kern/sched_prim.c237
-rw-r--r--kern/sched_prim.h33
-rw-r--r--kern/server_loop.ch104
-rw-r--r--kern/shuttle.h2
-rw-r--r--kern/slab.c406
-rw-r--r--kern/slab.h64
-rw-r--r--kern/startup.c55
-rw-r--r--kern/startup.h28
-rw-r--r--kern/strings.c40
-rw-r--r--kern/syscall_emulation.c54
-rw-r--r--kern/syscall_emulation.h6
-rw-r--r--kern/syscall_subr.c58
-rw-r--r--kern/syscall_subr.h1
-rw-r--r--kern/syscall_sw.c29
-rw-r--r--kern/syscall_sw.h6
-rw-r--r--kern/task.c190
-rw-r--r--kern/task.h22
-rw-r--r--kern/task_notify.cli7
-rw-r--r--kern/thread.c206
-rw-r--r--kern/thread.h47
-rw-r--r--kern/thread_swap.c27
-rw-r--r--kern/thread_swap.h5
-rw-r--r--kern/time_stamp.c24
-rw-r--r--kern/timer.c89
-rw-r--r--kern/timer.h6
-rw-r--r--kern/xpr.c29
-rw-r--r--kern/xpr.h2
-rw-r--r--linux/Makefrag.am5
-rw-r--r--linux/dev/arch/i386/kernel/irq.c2
-rw-r--r--linux/dev/drivers/block/ahci.c348
-rw-r--r--linux/dev/drivers/block/floppy.c2
-rw-r--r--linux/dev/drivers/block/genhd.c26
-rw-r--r--linux/dev/glue/block.c51
-rw-r--r--linux/dev/glue/glue.h4
-rw-r--r--linux/dev/glue/kmem.c6
-rw-r--r--linux/dev/include/asm-i386/string.h36
-rw-r--r--linux/dev/include/linux/blk.h6
-rw-r--r--linux/dev/include/linux/blkdev.h1
-rw-r--r--linux/dev/include/linux/fs.h8
-rw-r--r--linux/dev/include/linux/locks.h10
-rw-r--r--linux/dev/include/linux/mm.h2
-rw-r--r--linux/dev/include/linux/types.h9
-rw-r--r--linux/dev/init/main.c143
-rw-r--r--linux/pcmcia-cs/clients/axnet_cs.c2
-rw-r--r--linux/pcmcia-cs/glue/ds.c6
-rw-r--r--linux/src/arch/i386/kernel/bios32.c2
-rw-r--r--linux/src/drivers/block/ide-cd.c51
-rw-r--r--linux/src/drivers/block/ide.c34
-rw-r--r--linux/src/drivers/block/ide.h1
-rw-r--r--linux/src/drivers/net/3c507.c4
-rw-r--r--linux/src/drivers/net/3c509.c2
-rw-r--r--linux/src/drivers/net/3c515.c4
-rw-r--r--linux/src/drivers/net/ac3200.c2
-rw-r--r--linux/src/drivers/net/apricot.c2
-rw-r--r--linux/src/drivers/net/at1700.c2
-rw-r--r--linux/src/drivers/net/de4x5.c2
-rw-r--r--linux/src/drivers/net/de600.c2
-rw-r--r--linux/src/drivers/net/de620.c2
-rw-r--r--linux/src/drivers/net/depca.c2
-rw-r--r--linux/src/drivers/net/e2100.c6
-rw-r--r--linux/src/drivers/net/eepro.c2
-rw-r--r--linux/src/drivers/net/eepro100.c2
-rw-r--r--linux/src/drivers/net/eexpress.c2
-rw-r--r--linux/src/drivers/net/ewrk3.c2
-rw-r--r--linux/src/drivers/net/fmv18x.c2
-rw-r--r--linux/src/drivers/net/hp-plus.c2
-rw-r--r--linux/src/drivers/net/hp.c2
-rw-r--r--linux/src/drivers/net/lance.c2
-rw-r--r--linux/src/drivers/net/ne.c2
-rw-r--r--linux/src/drivers/net/pci-scan.c2
-rw-r--r--linux/src/drivers/net/pcnet32.c2
-rw-r--r--linux/src/drivers/net/seeq8005.c2
-rw-r--r--linux/src/drivers/net/smc-ultra.c2
-rw-r--r--linux/src/drivers/net/smc-ultra32.c2
-rw-r--r--linux/src/drivers/net/sundance.c2
-rw-r--r--linux/src/drivers/net/tlan.c4
-rw-r--r--linux/src/drivers/net/wd.c2
-rw-r--r--linux/src/drivers/scsi/AM53C974.c2
-rw-r--r--linux/src/drivers/scsi/FlashPoint.c14
-rw-r--r--linux/src/drivers/scsi/NCR5380.c4
-rw-r--r--linux/src/drivers/scsi/advansys.c1
-rw-r--r--linux/src/drivers/scsi/t128.c4
-rw-r--r--linux/src/include/asm-i386/bitops.h28
-rw-r--r--linux/src/include/asm-i386/io.h12
-rw-r--r--linux/src/include/asm-i386/segment.h8
-rw-r--r--linux/src/include/asm-i386/semaphore.h30
-rw-r--r--linux/src/include/asm-i386/termios.h4
-rw-r--r--linux/src/include/linux/compiler-gcc.h2
-rw-r--r--linux/src/include/linux/compiler-gcc5.h67
-rw-r--r--linux/src/include/linux/compiler-gcc6.h67
-rw-r--r--linux/src/include/linux/compiler.h8
-rw-r--r--linux/src/include/linux/interrupt.h12
-rw-r--r--linux/src/include/linux/string.h8
-rw-r--r--linux/src/include/net/route.h16
-rw-r--r--linux/src/include/net/sock.h6
-rw-r--r--util/atoi.c8
-rw-r--r--util/atoi.h2
-rw-r--r--version.m42
-rw-r--r--vm/memory_object.c161
-rw-r--r--vm/memory_object_proxy.c10
-rw-r--r--vm/memory_object_proxy.h15
-rw-r--r--vm/pmap.h39
-rw-r--r--vm/vm_debug.c35
-rw-r--r--vm/vm_external.c28
-rw-r--r--vm/vm_external.h5
-rw-r--r--vm/vm_fault.c160
-rw-r--r--vm/vm_fault.h6
-rw-r--r--vm/vm_init.c5
-rw-r--r--vm/vm_init.h25
-rw-r--r--vm/vm_kern.c319
-rw-r--r--vm/vm_kern.h16
-rw-r--r--vm/vm_map.c586
-rw-r--r--vm/vm_map.h37
-rw-r--r--vm/vm_object.c181
-rw-r--r--vm/vm_object.h14
-rw-r--r--vm/vm_page.c782
-rw-r--r--vm/vm_page.h275
-rw-r--r--vm/vm_pageout.c85
-rw-r--r--vm/vm_pageout.h6
-rw-r--r--vm/vm_print.h25
-rw-r--r--vm/vm_resident.c663
-rw-r--r--vm/vm_resident.h6
-rw-r--r--vm/vm_user.c149
-rw-r--r--xen/block.c7
-rw-r--r--xen/console.c2
-rw-r--r--xen/console.h10
-rw-r--r--xen/grant.c2
-rw-r--r--xen/net.c5
-rw-r--r--xen/time.c7
451 files changed, 15164 insertions, 10684 deletions
diff --git a/.gitignore b/.gitignore
index 4488bc2d..92fc6e7f 100644
--- a/.gitignore
+++ b/.gitignore
@@ -37,3 +37,16 @@ build-aux/
/mach/machine
/linux/src/include/asm
/linux/dev/include/asm
+
+/doc/mach.aux
+/doc/mach.cp
+/doc/mach.cps
+/doc/mach.fn
+/doc/mach.fns
+/doc/mach.ky
+/doc/mach.log
+/doc/mach.pdf
+/doc/mach.pg
+/doc/mach.toc
+/doc/mach.tp
+/doc/mach.vr
diff --git a/Makefile.am b/Makefile.am
index 918cdc39..1c1bffff 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -59,7 +59,12 @@ AM_CFLAGS += \
AM_CFLAGS += \
-Wall
-# See <http://lists.gnu.org/archive/html/bug-hurd/2006-01/msg00148.html>.
+# We need the GNU-style inline
+AM_CFLAGS += \
+ -fgnu89-inline
+
+# Much of the Mach code predates C99 and makes invalid assumptions about
+# type punning.
AM_CFLAGS += \
-fno-strict-aliasing
@@ -104,8 +109,8 @@ CPP = @CPP@ -x c
# Other Tools' Configuration.
#
-# Don't needlessly overwrite files that whose contents haven't changed. This
-# helps for avoinding unneccessary recompilation cycles when keeping
+# Don't needlessly overwrite files whose contents haven't changed.
+# This helps avoiding unnecessary recompilation cycles when keeping
# cross-compilation toolchains up-to-date. Thus, unconditionally use the
# `install-sh' that is supplied by GNU Automake 1.10.1, as the GNU Coreutils
# one doesn't provide this functionality yet (TODO: change that). TODO:
@@ -159,8 +164,9 @@ noinst_PROGRAMS += \
clib_routines := memcmp memcpy memmove \
strchr strstr strsep strtok \
htonl htons ntohl ntohs \
- udivdi3 __udivdi3 \
+ udivdi3 __udivdi3 __umoddi3 \
__rel_iplt_start __rel_iplt_end \
+ __ffsdi2 \
_START _start etext _edata end _end # actually ld magic, not libc.
gnumach-undef: gnumach.$(OBJEXT)
$(NM_V) $(NM) -u $< | sed 's/ *U *//' | sort -u > $@
@@ -173,6 +179,9 @@ clib-routines.o: gnumach-undef gnumach-undef-bad
then cat gnumach-undef-bad; exit 2; else true; fi
$(AM_V_CCLD) $(CCLD) -nostdlib -nostartfiles -r -static \
-o $@ `sed 's/^/-Wl,-u,/' < $<` -x c /dev/null -lc -lgcc
+ @if nm $@ | grep __init_cpu_features; \
+ then echo "Please install a 32bit libc without multiarch support (on Debian systems, the libc6-dev:i386 package containing /usr/lib/i386-linux-gnu/libc.a)". ; \
+ false ; fi
gnumach_LINK = $(LD) $(LINKFLAGS) $(gnumach_LINKFLAGS) -o $@
gnumach_LDADD = gnumach.o clib-routines.o
diff --git a/Makefrag.am b/Makefrag.am
index cce42cb4..9a68af81 100644
--- a/Makefrag.am
+++ b/Makefrag.am
@@ -24,6 +24,9 @@ libkernel_a_SOURCES += \
ddb/db_access.c \
ddb/db_access.h \
ddb/db_aout.c \
+ ddb/db_aout.h \
+ ddb/db_elf.c \
+ ddb/db_elf.h \
ddb/db_break.c \
ddb/db_break.h \
ddb/db_command.c \
@@ -42,6 +45,7 @@ libkernel_a_SOURCES += \
ddb/db_macro.c \
ddb/db_macro.h \
ddb/db_mp.c \
+ ddb/db_mp.h \
ddb/db_output.c \
ddb/db_output.h \
ddb/db_print.c \
@@ -76,8 +80,6 @@ endif
libkernel_a_SOURCES += \
ipc/ipc_entry.c \
ipc/ipc_entry.h \
- ipc/ipc_hash.c \
- ipc/ipc_hash.h \
ipc/ipc_init.c \
ipc/ipc_init.h \
ipc/ipc_kmsg.c \
@@ -101,8 +103,6 @@ libkernel_a_SOURCES += \
ipc/ipc_right.h \
ipc/ipc_space.c \
ipc/ipc_space.h \
- ipc/ipc_splay.c \
- ipc/ipc_splay.h \
ipc/ipc_table.c \
ipc/ipc_table.h \
ipc/ipc_target.c \
@@ -118,7 +118,8 @@ libkernel_a_SOURCES += \
ipc/mach_debug.c \
ipc/port.h
EXTRA_DIST += \
- ipc/mach_port.srv
+ ipc/mach_port.srv \
+ ipc/notify.defs
#
@@ -133,6 +134,7 @@ libkernel_a_SOURCES += \
kern/ast.h \
kern/boot_script.h \
kern/bootstrap.c \
+ kern/bootstrap.h \
kern/counters.c \
kern/counters.h \
kern/cpu_number.h \
@@ -141,6 +143,7 @@ libkernel_a_SOURCES += \
kern/eventcount.c \
kern/eventcount.h \
kern/exception.c \
+ kern/exception.h \
kern/host.c \
kern/host.h \
kern/ipc_host.c \
@@ -165,12 +168,13 @@ libkernel_a_SOURCES += \
kern/mach_factor.h \
kern/machine.c \
kern/machine.h \
- kern/macro_help.h \
+ kern/macros.h \
kern/pc_sample.c \
kern/pc_sample.h \
kern/printf.c \
kern/printf.h \
kern/priority.c \
+ kern/priority.h \
kern/processor.c \
kern/processor.h \
kern/profile.c \
@@ -179,6 +183,9 @@ libkernel_a_SOURCES += \
kern/rbtree.c \
kern/rbtree.h \
kern/rbtree_i.h \
+ kern/rdxtree.c \
+ kern/rdxtree.h \
+ kern/rdxtree_i.h \
kern/refcount.h \
kern/slab.c \
kern/slab.h \
@@ -187,6 +194,7 @@ libkernel_a_SOURCES += \
kern/sched_prim.h \
kern/shuttle.h \
kern/startup.c \
+ kern/startup.h \
kern/strings.c \
kern/syscall_emulation.c \
kern/syscall_emulation.h \
@@ -213,7 +221,8 @@ EXTRA_DIST += \
kern/mach4.srv \
kern/gnumach.srv \
kern/mach_debug.srv \
- kern/mach_host.srv
+ kern/mach_host.srv \
+ kern/task_notify.cli
#
@@ -243,12 +252,14 @@ libkernel_a_SOURCES += \
vm/vm_fault.c \
vm/vm_fault.h \
vm/vm_init.c \
+ vm/vm_init.h \
vm/vm_kern.c \
vm/vm_kern.h \
vm/vm_map.c \
vm/vm_map.h \
vm/vm_object.c \
vm/vm_object.h \
+ vm/vm_page.c \
vm/vm_page.h \
vm/vm_pageout.c \
vm/vm_pageout.h \
@@ -272,8 +283,10 @@ EXTRA_DIST += \
# TODO. Functions in device/subrs.c should each be moved elsewhere.
libkernel_a_SOURCES += \
device/blkio.c \
+ device/blkio.h \
device/buf.h \
device/chario.c \
+ device/chario.h \
device/cirbuf.h \
device/conf.h \
device/cons.c \
@@ -284,7 +297,9 @@ libkernel_a_SOURCES += \
device/dev_master.h \
device/dev_name.c \
device/dev_pager.c \
+ device/dev_pager.h \
device/device_init.c \
+ device/device_init.h \
device/device_port.h \
device/device_types_kernel.h \
device/ds_routines.c \
@@ -351,6 +366,7 @@ include_mach_HEADERS = \
include/mach/mach.defs \
include/mach/mach4.defs \
include/mach/gnumach.defs \
+ include/mach/task_notify.defs \
include/mach/mach_host.defs \
include/mach/mach_port.defs \
include/mach/mach_types.defs \
@@ -408,9 +424,18 @@ include_mach_eXec_HEADERS = \
include/mach/exec/elf.h \
include/mach/exec/exec.h
-# mach-debug-headers:= $(addprefix mach_debug/, hash_info.h ipc_info.h \
-# mach_debug.defs mach_debug_types.defs mach_debug_types.h \
-# pc_info.h vm_info.h slab_info.h)
+include_mach_debugdir = $(includedir)/mach_debug
+include_mach_debug_HEADERS = \
+ $(addprefix include/mach_debug/, \
+ hash_info.h \
+ ipc_info.h \
+ mach_debug.defs \
+ mach_debug_types.defs \
+ mach_debug_types.h \
+ pc_info.h \
+ vm_info.h \
+ slab_info.h \
+ )
# Other headers for the distribution. We don't install these, because the
# GNU C library has correct versions for users to use.
@@ -466,6 +491,13 @@ nodist_libkernel_a_SOURCES += \
# device/device_reply.user.defs
# device/memory_object_reply.user.defs
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ kern/task_notify.user.defs.c
+nodist_libkernel_a_SOURCES += \
+ kern/task_notify.user.h \
+ kern/task_notify.user.c \
+ kern/task_notify.user.msgids
+
# Server stubs.
nodist_lib_dep_tr_for_defs_a_SOURCES += \
device/device.server.defs.c \
@@ -514,6 +546,16 @@ nodist_libkernel_a_SOURCES += \
# kern/mach_debug.server.defs
# kern/mach_host.server.defs
+# Stand-alone rule to generate the list of message ids when neither
+# the client nor the server stubs are required.
+nodist_lib_dep_tr_for_defs_a_SOURCES += \
+ ipc/notify.none.defs.c \
+ kern/exc.none.defs.c
+nodist_libkernel_a_SOURCES += \
+ ipc/notify.none.msgids \
+ kern/exc.none.msgids
+# ipc/notify.none.defs
+
# rpctrace can make use of that.
MOSTLYCLEANFILES += \
gnumach.msgids
diff --git a/Makerules.am b/Makerules.am
index b1f17d12..5106fef1 100644
--- a/Makerules.am
+++ b/Makerules.am
@@ -21,7 +21,7 @@ EXTRA_DIST += \
gensym.awk
%.symc: %.sym gensym.awk
$(AWK_V) $(AWK) -f $(word 2,$^) $< > $@
-%.symc.o: %.symc
+%.symc.o: %.symc config.h
$(AM_V_CC) $(COMPILE) -S -x c -o $@ $<
%.h: %.symc.o
$(AM_V_GEN) sed < $< > $@ \
diff --git a/Makerules.mig.am b/Makerules.mig.am
index 30609846..8ae65557 100644
--- a/Makerules.mig.am
+++ b/Makerules.mig.am
@@ -74,19 +74,28 @@ lib_dep_tr_for_defs_a_CPPFLAGS = $(AM_CPPFLAGS) \
%.server.defs.c: %.srv
$(AM_V_at) rm -f $@
$(AM_V_GEN) cp -p $< $@
-%.user.defs.c: %.cli
- $(AM_V_at) rm -f $@
- $(AM_V_GEN) cp -p $< $@
%.server.h %.server.c %.server.msgids: lib_dep_tr_for_defs_a-%.server.defs.$(OBJEXT)
$(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMSFLAGS) \
-sheader $*.server.h -server $*.server.c \
-list $*.server.msgids \
< $<
+%.user.defs.c: %.cli
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
%.user.h %.user.c %.user.msgids: lib_dep_tr_for_defs_a-%.user.defs.$(OBJEXT)
$(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) $(MIGCOMUFLAGS) \
-user $*.user.c -header $*.user.h \
-list $*.user.msgids \
< $<
+# Stand-alone rule to generate the list of message ids when neither
+# the client nor the server stubs are required.
+%.none.defs.c: %.defs
+ $(AM_V_at) rm -f $@
+ $(AM_V_GEN) cp -p $< $@
+%.none.msgids: lib_dep_tr_for_defs_a-%.none.defs.$(OBJEXT)
+ $(MIGCOM_V) $(MIGCOM) $(MIGCOMFLAGS) \
+ -list $*.none.msgids \
+ < $<
# This is how it should be done, but this is not integrated into GNU Automake
# and is missing automatic inter-file dependency management because of that.
diff --git a/NEWS b/NEWS
index 55d75ccf..e8bb33f2 100644
--- a/NEWS
+++ b/NEWS
@@ -1,5 +1,51 @@
-2013-09-27
-Version 1.4
+Version 1.6 (2015-10-31)
+
+The code has been updated to work with newer versions of the compiler,
+and numerous bugs have been fixed throughout the code.
+
+The lock debugging infrastructure has been revived and improved, and
+many locking issues have been fixed.
+
+The IPC tables and the hash table mapping objects to IPC entries have
+been replaced by radix trees. This addresses a scalability issue, as
+IPC tables required huge amounts of continuous virtual kernel memory.
+
+The kernel now allows non-privileged users to wire a small amount of
+memory.
+
+A bug hindering the eviction of inactive pages by the pageout daemon
+has been identified and fixed.
+
+The kernel now keeps timestamps relative to the system boot time.
+Among other things this fixes bogus uptime readings if the system time
+is altered.
+
+A reference leak in the exception handling mechanism has been
+identified and fixed.
+
+ANSI escape sequences are now handled when using `printf'. This fixes
+the formatting of messages printed by various Linux drivers.
+
+Version 1.5 (2015-04-10)
+
+Numerous cleanups and stylistic fixes of the code base. Several
+problems have been identified using static analysis tools and
+subsequently been fixed.
+
+A protected payload can now be associated with capabilities. This
+payload is attached by the kernel to delivered messages and can be
+used to speed up the object lookup in the receiving task.
+
+The kernel debugger can now parse ELF symbol tables, can be invoked
+over serial lines, gained two new commands and has received usability
+improvements.
+
+The vm pageout policy has been tuned to accommodate modern hardware.
+
+The kernel gained partial ACPI support on x86, enough to power down
+the system.
+
+Version 1.4 (2013-09-27)
Really too many to list them individually. Highlight include numerous bug and
stability fixes, a Xen port for 32-bit x86 including basic support for Physical
diff --git a/chips/busses.c b/chips/busses.c
index 89afa973..3811d0c6 100644
--- a/chips/busses.c
+++ b/chips/busses.c
@@ -59,17 +59,17 @@
*
*/
boolean_t configure_bus_master(
- char *name,
+ const char *name,
vm_offset_t virt,
vm_offset_t phys,
int adpt_no,
- char *bus_name)
+ const char *bus_name)
{
- register struct bus_device *device;
- register struct bus_ctlr *master;
- register struct bus_driver *driver;
+ struct bus_device *device;
+ struct bus_ctlr *master;
+ struct bus_driver *driver;
- int found = 0;
+ boolean_t found = FALSE;
/*
* Match the name in the table, then pick the entry that has the
@@ -81,7 +81,7 @@ boolean_t configure_bus_master(
continue;
if (((master->adaptor == adpt_no) || (master->adaptor == '?')) &&
(strcmp(master->name, name) == 0)) {
- found = 1;
+ found = TRUE;
break;
}
}
@@ -171,16 +171,16 @@ boolean_t configure_bus_master(
*
*/
boolean_t configure_bus_device(
- char *name,
+ const char *name,
vm_offset_t virt,
vm_offset_t phys,
int adpt_no,
- char *bus_name)
+ const char *bus_name)
{
- register struct bus_device *device;
- register struct bus_driver *driver;
+ struct bus_device *device;
+ struct bus_driver *driver;
- int found = 0;
+ boolean_t found = FALSE;
/*
* Walk all devices to find one with the right name
@@ -196,7 +196,7 @@ boolean_t configure_bus_device(
((!device->phys_address) ||
((device->phys_address == phys) && (device->address == virt))) &&
(strcmp(device->name, name) == 0)) {
- found = 1;
+ found = TRUE;
break;
}
}
diff --git a/chips/busses.h b/chips/busses.h
index 49c0e44f..f728add0 100644
--- a/chips/busses.h
+++ b/chips/busses.h
@@ -144,10 +144,10 @@ struct bus_driver {
extern struct bus_ctlr bus_master_init[];
extern struct bus_device bus_device_init[];
-extern boolean_t configure_bus_master(char *, vm_offset_t, vm_offset_t,
- int, char * );
-extern boolean_t configure_bus_device(char *, vm_offset_t, vm_offset_t,
- int, char * );
+extern boolean_t configure_bus_master(const char *, vm_offset_t, vm_offset_t,
+ int, const char * );
+extern boolean_t configure_bus_device(const char *, vm_offset_t, vm_offset_t,
+ int, const char * );
#endif /* KERNEL */
diff --git a/configfrag.ac b/configfrag.ac
index 5f13b63c..c0e04b32 100644
--- a/configfrag.ac
+++ b/configfrag.ac
@@ -63,7 +63,7 @@ AC_DEFINE([MACH_IPC_DEBUG], [1], [MACH_IPC_DEBUG])
# Testing code/printfs.
AC_DEFINE([MACH_IPC_TEST], [0], [MACH_IPC_TEST])
-# Sanity-check simple locking.
+# Sanity-check locking.
AC_DEFINE([MACH_LDEBUG], [0], [MACH_LDEBUG])
# MP lock monitoring. Registers use of locks, contention. Depending on
@@ -101,7 +101,7 @@ AC_DEFINE([SIMPLE_CLOCK], [0], [SIMPLE_CLOCK])
AC_DEFINE([STAT_TIME], [1], [STAT_TIME])
# Kernel tracing.
-AC_DEFINE([XPR_DEBUG], [1], [XPR_DEBUG])
+AC_DEFINE([XPR_DEBUG], [0], [XPR_DEBUG])
# Slab allocator debugging facilities.
AC_DEFINE([SLAB_VERIFY], [0], [SLAB_VERIFY])
@@ -142,7 +142,7 @@ AC_ARG_ENABLE([kmsg],
# `${file}' and `$file' have different meanings here with respect to having the
# files in the referenced directory considered for `make dist' or not. See
# <http://lists.gnu.org/archive/html/bug-automake/2006-11/msg00027.html>.
-AC_CONFIG_LINKS([machine:$systype/$systype
+AC_CONFIG_LINKS([machine:$srcdir/$systype/$systype
mach/machine:$systype/include/mach/$systype])
dnl Local Variables:
diff --git a/ddb/db_access.c b/ddb/db_access.c
index 69922557..16d4d3ef 100644
--- a/ddb/db_access.c
+++ b/ddb/db_access.c
@@ -62,15 +62,15 @@ static int db_extend[sizeof(int)+1] = { /* table for sign-extending */
};
db_expr_t
-db_get_task_value(addr, size, is_signed, task)
- db_addr_t addr;
- register int size;
- boolean_t is_signed;
- task_t task;
+db_get_task_value(
+ db_addr_t addr,
+ int size,
+ boolean_t is_signed,
+ task_t task)
{
char data[sizeof(db_expr_t)];
- register db_expr_t value;
- register int i;
+ db_expr_t value;
+ int i;
db_read_bytes(addr, size, data, task);
@@ -92,14 +92,14 @@ db_get_task_value(addr, size, is_signed, task)
}
void
-db_put_task_value(addr, size, value, task)
- db_addr_t addr;
- register int size;
- register db_expr_t value;
- task_t task;
+db_put_task_value(
+ db_addr_t addr,
+ int size,
+ db_expr_t value,
+ task_t task)
{
char data[sizeof(db_expr_t)];
- register int i;
+ int i;
#if BYTE_MSF
for (i = size - 1; i >= 0; i--)
@@ -115,19 +115,19 @@ db_put_task_value(addr, size, value, task)
}
db_expr_t
-db_get_value(addr, size, is_signed)
- db_addr_t addr;
- int size;
- boolean_t is_signed;
+db_get_value(
+ db_addr_t addr,
+ int size,
+ boolean_t is_signed)
{
return(db_get_task_value(addr, size, is_signed, TASK_NULL));
}
void
-db_put_value(addr, size, value)
- db_addr_t addr;
- int size;
- db_expr_t value;
+db_put_value(
+ db_addr_t addr,
+ int size,
+ db_expr_t value)
{
db_put_task_value(addr, size, value, TASK_NULL);
}
diff --git a/ddb/db_access.h b/ddb/db_access.h
index 6cedf29f..3bda5a4a 100644
--- a/ddb/db_access.h
+++ b/ddb/db_access.h
@@ -30,6 +30,10 @@
/*
* Data access functions for debugger.
*/
+
+#ifndef _DDB_DB_ACCESS_H_
+#define _DDB_DB_ACCESS_H_
+
#include <mach/boolean.h>
#include <machine/db_machdep.h>
#include <ddb/db_task_thread.h>
@@ -71,3 +75,5 @@ extern void db_put_task_value( db_addr_t addr,
int size,
db_expr_t value,
task_t task );
+
+#endif /* _DDB_DB_ACCESS_H_ */
diff --git a/ddb/db_aout.c b/ddb/db_aout.c
index 42fa6f75..d3f2e31e 100644
--- a/ddb/db_aout.c
+++ b/ddb/db_aout.c
@@ -39,6 +39,7 @@
#include <machine/db_machdep.h> /* data types */
#include <ddb/db_output.h>
#include <ddb/db_sym.h>
+#include <ddb/db_aout.h>
#ifndef DB_NO_AOUT
@@ -69,18 +70,18 @@
ep = (struct nlist *)((char *)sp + *((int*)symtab)))
boolean_t
-aout_db_sym_init(symtab, esymtab, name, task_addr)
- char * symtab; /* pointer to start of symbol table */
- char * esymtab; /* pointer to end of string table,
+aout_db_sym_init(
+ char * symtab, /* pointer to start of symbol table */
+ char * esymtab, /* pointer to end of string table,
for checking - may be rounded up to
integer boundary */
- char * name;
- char * task_addr; /* use for this task only */
+ char * name,
+ char * task_addr) /* use for this task only */
{
- register struct nlist *sym_start, *sym_end;
- register struct nlist *sp;
- register char * strtab;
- register int strlen;
+ struct nlist *sym_start, *sym_end;
+ struct nlist *sp;
+ char * strtab;
+ int strlen;
char * estrtab;
db_get_aout_symtab(symtab, sym_start, sym_end);
@@ -100,7 +101,7 @@ aout_db_sym_init(symtab, esymtab, name, task_addr)
#undef round_to_size
for (sp = sym_start; sp < sym_end; sp++) {
- register long strx;
+ long strx;
strx = sp->n_un.n_strx;
if (strx != 0) {
if (strx > strlen) {
@@ -131,9 +132,9 @@ aout_db_sym_init(symtab, esymtab, name, task_addr)
/*
* check file name or not (check xxxx.x pattern)
*/
-private boolean_t
+private boolean_t __attribute__ ((pure))
aout_db_is_filename(name)
- register char *name;
+ const char *name;
{
while (*name) {
if (*name == '.') {
@@ -148,12 +149,12 @@ aout_db_is_filename(name)
/*
* special name comparison routine with a name in the symbol table entry
*/
-private boolean_t
+private boolean_t __attribute__ ((pure))
aout_db_eq_name(sp, name)
- struct nlist *sp;
- char *name;
+ const struct nlist *sp;
+ const char *name;
{
- register char *s1, *s2;
+ const char *s1, *s2;
s1 = sp->n_un.n_name;
s2 = name;
@@ -185,11 +186,11 @@ aout_db_eq_name(sp, name)
*/
private struct nlist *
aout_db_search_name(sp, ep, name, type, fp)
- register struct nlist *sp;
- struct nlist *ep;
- char *name;
- int type;
- struct nlist **fp;
+ struct nlist *sp;
+ const struct nlist *ep;
+ const char *name;
+ int type;
+ struct nlist **fp;
{
struct nlist *file_sp = *fp;
struct nlist *found_sp = 0;
@@ -232,11 +233,11 @@ aout_db_search_name(sp, ep, name, type, fp)
private db_sym_t
aout_db_qualified_search(stab, file, sym, line)
db_symtab_t *stab;
- char *file;
- char *sym;
+ const char *file;
+ const char *sym;
int line;
{
- register struct nlist *sp = (struct nlist *)stab->start;
+ struct nlist *sp = (struct nlist *)stab->start;
struct nlist *ep = (struct nlist *)stab->end;
struct nlist *fp = 0;
struct nlist *found_sp;
@@ -244,19 +245,19 @@ aout_db_qualified_search(stab, file, sym, line)
boolean_t in_file;
if (file == 0 && sym == 0)
- return(0);
+ return(DB_SYM_NULL);
if (file) {
if ((sp = aout_db_search_name(sp, ep, file, N_TEXT, &fp)) == 0)
- return(0);
+ return(DB_SYM_NULL);
}
if (sym) {
sp = aout_db_search_name(sp, ep, sym, (line > 0)? N_FUN: 0, &fp);
if (sp == 0)
- return(0);
+ return(DB_SYM_NULL);
}
if (line > 0) {
if (file && !aout_db_eq_name(fp, file))
- return(0);
+ return(DB_SYM_NULL);
found_sp = 0;
if (sp->n_type == N_FUN) {
/*
@@ -278,7 +279,7 @@ aout_db_qualified_search(stab, file, sym, line)
}
}
if (sp->n_type != N_SLINE || sp->n_value < func_top)
- return(0);
+ return(DB_SYM_NULL);
} else {
/*
* qualified by only file name
@@ -312,26 +313,23 @@ aout_db_qualified_search(stab, file, sym, line)
* lookup symbol by name
*/
db_sym_t
-aout_db_lookup(stab, symstr)
- db_symtab_t *stab;
- char * symstr;
+aout_db_lookup(
+ db_symtab_t *stab,
+ char * symstr)
{
- db_sym_t db_sym_parse_and_lookup();
-
return(db_sym_parse_and_lookup(aout_db_qualified_search, stab, symstr));
}
db_sym_t
-aout_db_search_symbol(symtab, off, strategy, diffp)
- db_symtab_t * symtab;
- register
- db_addr_t off;
- db_strategy_t strategy;
- db_expr_t *diffp; /* in/out */
+aout_db_search_symbol(
+ db_symtab_t * symtab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp) /* in/out */
{
- register unsigned long diff = *diffp;
- register struct nlist *symp = 0;
- register struct nlist *sp, *ep;
+ unsigned long diff = *diffp;
+ struct nlist *symp = 0;
+ struct nlist *sp, *ep;
sp = (struct nlist *)symtab->start;
ep = (struct nlist *)symtab->end;
@@ -376,13 +374,13 @@ aout_db_search_symbol(symtab, off, strategy, diffp)
* Return the name and value for a symbol.
*/
void
-aout_db_symbol_values(stab, sym, namep, valuep)
- db_symtab_t *stab;
- db_sym_t sym;
- char **namep;
- db_expr_t *valuep;
+aout_db_symbol_values(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep)
{
- register struct nlist *sp;
+ struct nlist *sp;
sp = (struct nlist *)sym;
if (namep)
@@ -398,16 +396,16 @@ aout_db_symbol_values(stab, sym, namep, valuep)
*/
private boolean_t
aout_db_search_by_addr(stab, addr, file, func, line, diff)
- db_symtab_t *stab;
- register vm_offset_t addr;
- char **file;
- char **func;
- int *line;
- unsigned long *diff;
+ const db_symtab_t *stab;
+ vm_offset_t addr;
+ char **file;
+ char **func;
+ int *line;
+ unsigned long *diff;
{
- register struct nlist *sp;
- register struct nlist *line_sp, *func_sp, *file_sp, *line_func;
- register vm_size_t func_diff, line_diff;
+ struct nlist *sp;
+ struct nlist *line_sp, *func_sp, *file_sp, *line_func;
+ vm_size_t func_diff, line_diff;
boolean_t found_line = FALSE;
struct nlist *ep = (struct nlist *)stab->end;
@@ -495,13 +493,13 @@ aout_db_line_at_pc(stab, sym, file, line, pc)
db_sym_t sym;
char **file;
int *line;
- db_expr_t pc;
+ db_addr_t pc;
{
char *func;
unsigned long diff;
boolean_t found;
- found = aout_db_search_by_addr(stab,(vm_offset_t)pc,file,&func,line,&diff);
+ found = aout_db_search_by_addr(stab, pc, file, &func, line, &diff);
return(found && func && *file);
}
diff --git a/ddb/db_aout.h b/ddb/db_aout.h
new file mode 100644
index 00000000..7c03d36d
--- /dev/null
+++ b/ddb/db_aout.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DDB_DB_AOUT_H_
+#define _DDB_DB_AOUT_H_
+
+#include <ddb/db_sym.h>
+#include <machine/db_machdep.h>
+
+extern boolean_t
+aout_db_line_at_pc(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_addr_t pc);
+
+extern db_sym_t
+aout_db_lookup(
+ db_symtab_t *stab,
+ char * symstr);
+
+extern db_sym_t
+aout_db_search_symbol(
+ db_symtab_t * symtab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp);
+
+extern void
+aout_db_symbol_values(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep);
+
+#endif /* _DDB_DB_AOUT_H_ */
diff --git a/ddb/db_break.c b/ddb/db_break.c
index 75253923..c3a9e181 100644
--- a/ddb/db_break.c
+++ b/ddb/db_break.c
@@ -36,6 +36,7 @@
*/
#include <mach/boolean.h>
#include <machine/db_machdep.h>
+#include <machine/db_interface.h>
#include <ddb/db_lex.h>
#include <ddb/db_break.h>
#include <ddb/db_access.h>
@@ -46,7 +47,6 @@
#include <ddb/db_output.h>
#include <ddb/db_cond.h>
#include <ddb/db_expr.h>
-#include <ddb/db_access.h>
#define NBREAKPOINTS 100
#define NTHREAD_LIST (NBREAKPOINTS*3)
@@ -64,7 +64,7 @@ static int db_breakpoint_number = 0;
db_breakpoint_t
db_breakpoint_alloc()
{
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
if ((bkpt = db_free_breakpoints) != 0) {
db_free_breakpoints = bkpt->link;
@@ -82,7 +82,7 @@ db_breakpoint_alloc()
void
db_breakpoint_free(bkpt)
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
{
bkpt->link = db_free_breakpoints;
db_free_breakpoints = bkpt;
@@ -90,11 +90,12 @@ db_breakpoint_free(bkpt)
static int
db_add_thread_breakpoint(bkpt, task_thd, count, task_bpt)
- register db_breakpoint_t bkpt;
+ const db_breakpoint_t bkpt;
vm_offset_t task_thd;
+ int count;
boolean_t task_bpt;
{
- register db_thread_breakpoint_t tp;
+ db_thread_breakpoint_t tp;
if (db_thread_break_init == FALSE) {
for (tp = db_thread_break_list;
@@ -120,12 +121,12 @@ db_add_thread_breakpoint(bkpt, task_thd, count, task_bpt)
}
static int
-db_delete_thread_breakpoint(bkpt, task_thd)
- register db_breakpoint_t bkpt;
- vm_offset_t task_thd;
+db_delete_thread_breakpoint(
+ db_breakpoint_t bkpt,
+ vm_offset_t task_thd)
{
- register db_thread_breakpoint_t tp;
- register db_thread_breakpoint_t *tpp;
+ db_thread_breakpoint_t tp;
+ db_thread_breakpoint_t *tpp;
if (task_thd == 0) {
/* delete all the thread-breakpoints */
@@ -153,13 +154,13 @@ db_delete_thread_breakpoint(bkpt, task_thd)
}
}
-static db_thread_breakpoint_t
+static db_thread_breakpoint_t __attribute__ ((pure))
db_find_thread_breakpoint(bkpt, thread)
- db_breakpoint_t bkpt;
- thread_t thread;
+ const db_breakpoint_t bkpt;
+ const thread_t thread;
{
- register db_thread_breakpoint_t tp;
- register task_t task = (thread == THREAD_NULL)? TASK_NULL: thread->task;
+ db_thread_breakpoint_t tp;
+ task_t task = (thread == THREAD_NULL)? TASK_NULL: thread->task;
for (tp = bkpt->threads; tp; tp = tp->tb_next) {
if (tp->tb_is_task) {
@@ -175,24 +176,24 @@ db_find_thread_breakpoint(bkpt, thread)
db_thread_breakpoint_t
db_find_thread_breakpoint_here(task, addr)
- task_t task;
+ const task_t task;
db_addr_t addr;
{
db_breakpoint_t bkpt;
- bkpt = db_find_breakpoint(task, (db_addr_t)addr);
+ bkpt = db_find_breakpoint(task, addr);
if (bkpt == 0)
return(0);
return(db_find_thread_breakpoint(bkpt, current_thread()));
}
db_thread_breakpoint_t
-db_find_breakpoint_number(num, bkptp)
- int num;
- db_breakpoint_t *bkptp;
+db_find_breakpoint_number(
+ int num,
+ db_breakpoint_t *bkptp)
{
- register db_thread_breakpoint_t tp;
- register db_breakpoint_t bkpt;
+ db_thread_breakpoint_t tp;
+ db_breakpoint_t bkpt;
for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
for (tp = bkpt->threads; tp; tp = tp->tb_next) {
@@ -207,10 +208,10 @@ db_find_breakpoint_number(num, bkptp)
}
static void
-db_force_delete_breakpoint(bkpt, task_thd, is_task)
- db_breakpoint_t bkpt;
- vm_offset_t task_thd;
- boolean_t is_task;
+db_force_delete_breakpoint(
+ db_breakpoint_t bkpt,
+ vm_offset_t task_thd,
+ boolean_t is_task)
{
db_printf("deleted a stale breakpoint at ");
if (bkpt->task == TASK_NULL || db_lookup_task(bkpt->task) >= 0)
@@ -226,10 +227,10 @@ db_force_delete_breakpoint(bkpt, task_thd, is_task)
}
void
-db_check_breakpoint_valid()
+db_check_breakpoint_valid(void)
{
- register db_thread_breakpoint_t tbp, tbp_next;
- register db_breakpoint_t bkpt, *bkptp;
+ db_thread_breakpoint_t tbp, tbp_next;
+ db_breakpoint_t bkpt, *bkptp;
bkptp = &db_breakpoint_list;
for (bkpt = *bkptp; bkpt; bkpt = *bkptp) {
@@ -267,13 +268,13 @@ db_check_breakpoint_valid()
db_breakpoint_t
db_set_breakpoint(task, addr, count, thread, task_bpt)
- task_t task;
+ const task_t task;
db_addr_t addr;
int count;
- thread_t thread;
+ const thread_t thread;
boolean_t task_bpt;
{
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
db_breakpoint_t alloc_bkpt = 0;
vm_offset_t task_thd;
@@ -320,12 +321,12 @@ db_set_breakpoint(task, addr, count, thread, task_bpt)
void
db_delete_breakpoint(task, addr, task_thd)
- task_t task;
+ const task_t task;
db_addr_t addr;
vm_offset_t task_thd;
{
- register db_breakpoint_t bkpt;
- register db_breakpoint_t *prev;
+ db_breakpoint_t bkpt;
+ db_breakpoint_t *prev;
for (prev = &db_breakpoint_list; (bkpt = *prev) != 0;
prev = &bkpt->link) {
@@ -349,12 +350,12 @@ db_delete_breakpoint(task, addr, task_thd)
}
}
-db_breakpoint_t
+db_breakpoint_t __attribute__ ((pure))
db_find_breakpoint(task, addr)
- task_t task;
+ const task_t task;
db_addr_t addr;
{
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
if ((bkpt->task == task
@@ -367,10 +368,10 @@ db_find_breakpoint(task, addr)
boolean_t
db_find_breakpoint_here(task, addr)
- task_t task;
+ const task_t task;
db_addr_t addr;
{
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
for (bkpt = db_breakpoint_list; bkpt != 0; bkpt = bkpt->link) {
if ((bkpt->task == task
@@ -378,7 +379,7 @@ db_find_breakpoint_here(task, addr)
&& bkpt->address == addr)
return(TRUE);
if ((bkpt->flags & BKPT_USR_GLOBAL) == 0 &&
- DB_PHYS_EQ(task, (vm_offset_t)addr, bkpt->task, (vm_offset_t)bkpt->address))
+ DB_PHYS_EQ(task, addr, bkpt->task, bkpt->address))
return (TRUE);
}
return(FALSE);
@@ -389,8 +390,8 @@ boolean_t db_breakpoints_inserted = TRUE;
void
db_set_breakpoints(void)
{
- register db_breakpoint_t bkpt;
- register task_t task;
+ db_breakpoint_t bkpt;
+ task_t task;
db_expr_t inst;
task_t cur_task;
@@ -434,8 +435,8 @@ db_set_breakpoints(void)
void
db_clear_breakpoints(void)
{
- register db_breakpoint_t bkpt, *bkptp;
- register task_t task;
+ db_breakpoint_t bkpt, *bkptp;
+ task_t task;
task_t cur_task;
db_expr_t inst;
@@ -481,11 +482,11 @@ db_clear_breakpoints(void)
* so the breakpoint does not have to be on the breakpoint list.
*/
db_breakpoint_t
-db_set_temp_breakpoint(task, addr)
- task_t task;
- db_addr_t addr;
+db_set_temp_breakpoint(
+ task_t task,
+ db_addr_t addr)
{
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
bkpt = db_breakpoint_alloc();
if (bkpt == 0) {
@@ -510,9 +511,9 @@ db_set_temp_breakpoint(task, addr)
}
void
-db_delete_temp_breakpoint(task, bkpt)
- task_t task;
- db_breakpoint_t bkpt;
+db_delete_temp_breakpoint(
+ task_t task,
+ db_breakpoint_t bkpt)
{
db_put_task_value(bkpt->address, BKPT_SIZE, bkpt->bkpt_inst, task);
db_delete_thread_breakpoint(bkpt, 0);
@@ -523,9 +524,9 @@ db_delete_temp_breakpoint(task, bkpt)
* List breakpoints.
*/
void
-db_list_breakpoints()
+db_list_breakpoints(void)
{
- register db_breakpoint_t bkpt;
+ db_breakpoint_t bkpt;
if (db_breakpoint_list == 0) {
db_printf("No breakpoints set\n");
@@ -537,9 +538,9 @@ db_list_breakpoints()
bkpt != 0;
bkpt = bkpt->link)
{
- register db_thread_breakpoint_t tp;
- int task_id;
- int thread_id;
+ db_thread_breakpoint_t tp;
+ int task_id;
+ int thread_id;
if (bkpt->threads) {
for (tp = bkpt->threads; tp; tp = tp->tb_next) {
@@ -597,9 +598,9 @@ db_list_breakpoints()
/* Delete breakpoint */
/*ARGSUSED*/
void
-db_delete_cmd()
+db_delete_cmd(void)
{
- register int n;
+ int n;
thread_t thread;
vm_offset_t task_thd;
boolean_t user_global = FALSE;
@@ -680,9 +681,9 @@ db_breakpoint_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
- register int n;
+ int n;
thread_t thread;
boolean_t user_global = db_option(modif, 'U');
boolean_t task_bpt = db_option(modif, 'T');
@@ -730,7 +731,7 @@ db_breakpoint_cmd(addr, have_addr, count, modif)
/* list breakpoints */
void
-db_listbreak_cmd()
+db_listbreak_cmd(void)
{
db_list_breakpoints();
}
diff --git a/ddb/db_break.h b/ddb/db_break.h
index 89e78894..610af2f8 100644
--- a/ddb/db_break.h
+++ b/ddb/db_break.h
@@ -71,12 +71,12 @@ struct db_breakpoint {
typedef struct db_breakpoint *db_breakpoint_t;
-extern db_breakpoint_t db_find_breakpoint( task_t task, db_addr_t addr);
-extern boolean_t db_find_breakpoint_here( task_t task, db_addr_t addr);
+extern db_breakpoint_t db_find_breakpoint( const task_t task, db_addr_t addr) __attribute__ ((pure));
+extern boolean_t db_find_breakpoint_here( const task_t task, db_addr_t addr);
extern void db_set_breakpoints(void);
extern void db_clear_breakpoints(void);
extern db_thread_breakpoint_t db_find_thread_breakpoint_here
- ( task_t task, db_addr_t addr );
+ ( const task_t task, db_addr_t addr );
extern db_thread_breakpoint_t db_find_breakpoint_number
( int num, db_breakpoint_t *bkptp);
@@ -84,18 +84,20 @@ extern db_breakpoint_t db_set_temp_breakpoint( task_t task, db_addr_t addr);
extern void db_delete_temp_breakpoint
( task_t task, db_breakpoint_t bkpt);
-extern db_breakpoint_t db_set_breakpoint(task_t task, db_addr_t addr,
- int count, thread_t thread,
+extern db_breakpoint_t db_set_breakpoint(const task_t task, db_addr_t addr,
+ int count, const thread_t thread,
boolean_t task_bpt);
-void db_listbreak_cmd();
+void db_listbreak_cmd(void);
-void db_delete_cmd();
+void db_delete_cmd(void);
void db_breakpoint_cmd(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
+
+extern void db_check_breakpoint_valid(void);
#endif /* _DDB_DB_BREAK_H_ */
diff --git a/ddb/db_command.c b/ddb/db_command.c
index cb14da82..721f04fe 100644
--- a/ddb/db_command.c
+++ b/ddb/db_command.c
@@ -54,8 +54,10 @@
#include <ddb/db_cond.h>
#include <machine/setjmp.h>
+#include <machine/db_interface.h>
#include <kern/debug.h>
#include <kern/thread.h>
+#include <kern/slab.h>
#include <ipc/ipc_pset.h> /* 4proto */
#include <ipc/ipc_port.h> /* 4proto */
@@ -94,17 +96,17 @@ boolean_t db_ed_style = TRUE;
*/
int
db_cmd_search(name, table, cmdp)
- char * name;
- struct db_command *table;
- struct db_command **cmdp; /* out */
+ const char * name;
+ const struct db_command *table;
+ const struct db_command **cmdp; /* out */
{
- struct db_command *cmd;
+ const struct db_command *cmd;
int result = CMD_NONE;
for (cmd = table; cmd->name != 0; cmd++) {
- register char *lp;
- register char *rp;
- register int c;
+ const char *lp;
+ char *rp;
+ int c;
lp = name;
rp = cmd->name;
@@ -141,9 +143,9 @@ db_cmd_search(name, table, cmdp)
void
db_cmd_list(table)
- struct db_command *table;
+ const struct db_command *table;
{
- register struct db_command *cmd;
+ const struct db_command *cmd;
for (cmd = table; cmd->name != 0; cmd++) {
db_printf("%-12s", cmd->name);
@@ -152,9 +154,9 @@ db_cmd_list(table)
}
void
-db_command(last_cmdp, cmd_table)
- struct db_command **last_cmdp; /* IN_OUT */
- struct db_command *cmd_table;
+db_command(
+ struct db_command **last_cmdp, /* IN_OUT */
+ struct db_command *cmd_table)
{
struct db_command *cmd;
int t;
@@ -175,7 +177,6 @@ db_command(last_cmdp, cmd_table)
db_unread_token(t);
}
else if (t == tEXCL) {
- void db_fncall();
db_fncall();
return;
}
@@ -293,19 +294,18 @@ db_command(last_cmdp, cmd_table)
}
void
-db_command_list(last_cmdp, cmd_table)
- struct db_command **last_cmdp; /* IN_OUT */
- struct db_command *cmd_table;
+db_command_list(
+ struct db_command **last_cmdp, /* IN_OUT */
+ struct db_command *cmd_table)
{
- void db_skip_to_eol();
-
do {
db_command(last_cmdp, cmd_table);
db_skip_to_eol();
- } while (db_read_token() == tSEMI_COLON && db_cmd_loop_done == 0);
+ } while (db_read_token() == tSEMI_COLON && db_cmd_loop_done == FALSE);
}
struct db_command db_show_all_cmds[] = {
+ { "tasks", db_show_all_tasks, 0, 0 },
{ "threads", db_show_all_threads, 0, 0 },
{ "slocks", db_show_all_slocks, 0, 0 },
{ (char *)0 }
@@ -328,13 +328,10 @@ struct db_command db_show_cmds[] = {
{ "kmsg", ipc_kmsg_print, 0, 0 },
{ "msg", ipc_msg_print, 0, 0 },
{ "ipc_port", db_show_port_id, 0, 0 },
+ { "slabinfo", db_show_slab_info, 0, 0 },
{ (char *)0, }
};
-void db_help_cmd();
-extern void db_stack_trace_cmd();
-extern void db_reset_cpu();
-
struct db_command db_command_table[] = {
#ifdef DB_MACHINE_COMMANDS
/* this must be the first entry, if it exists */
@@ -369,6 +366,7 @@ struct db_command db_command_table[] = {
{ "show", 0, 0, db_show_cmds },
{ "reset", db_reset_cpu, 0, 0 },
{ "reboot", db_reset_cpu, 0, 0 },
+ { "halt", db_halt_cpu, 0, 0 },
{ (char *)0, }
};
@@ -376,20 +374,19 @@ struct db_command db_command_table[] = {
/* this function should be called to install the machine dependent
commands. It should be called before the debugger is enabled */
-void db_machine_commands_install(ptr)
-struct db_command *ptr;
+void db_machine_commands_install(struct db_command *ptr)
{
db_command_table[0].more = ptr;
return;
}
-#endif
+#endif /* DB_MACHINE_COMMANDS */
struct db_command *db_last_command = 0;
void
-db_help_cmd()
+db_help_cmd(void)
{
struct db_command *cmd = db_command_table;
@@ -400,8 +397,6 @@ db_help_cmd()
}
}
-int (*ddb_display)();
-
void
db_command_loop(void)
{
@@ -416,10 +411,7 @@ db_command_loop(void)
db_prev = db_dot;
db_next = db_dot;
- if (ddb_display)
- (*ddb_display)();
-
- db_cmd_loop_done = 0;
+ db_cmd_loop_done = FALSE;
while (!db_cmd_loop_done) {
(void) _setjmp(db_recover = &db_jmpbuf);
db_macro_level = 0;
@@ -440,13 +432,13 @@ db_command_loop(void)
}
boolean_t
-db_exec_cmd_nest(cmd, size)
- char *cmd;
- int size;
+db_exec_cmd_nest(
+ char *cmd,
+ int size)
{
struct db_lex_context lex_context;
- db_cmd_loop_done = 0;
+ db_cmd_loop_done = FALSE;
if (cmd) {
db_save_lex_context(&lex_context);
db_switch_input(cmd, size /**OLD, &lex_context OLD**/);
@@ -454,11 +446,11 @@ db_exec_cmd_nest(cmd, size)
db_command_list(&db_last_command, db_command_table);
if (cmd)
db_restore_lex_context(&lex_context);
- return(db_cmd_loop_done == 0);
+ return(db_cmd_loop_done == FALSE);
}
void db_error(s)
- char *s;
+ const char *s;
{
extern int db_macro_level;
@@ -482,7 +474,7 @@ void db_error(s)
* !expr(arg,arg,arg)
*/
void
-db_fncall()
+db_fncall(void)
{
db_expr_t fn_addr;
#define MAXARGS 11
@@ -533,12 +525,12 @@ db_fncall()
db_printf(" %#N\n", retval);
}
-boolean_t
+boolean_t __attribute__ ((pure))
db_option(modif, option)
- char *modif;
- int option;
+ const char *modif;
+ int option;
{
- register char *p;
+ const char *p;
for (p = modif; *p; p++)
if (*p == option)
diff --git a/ddb/db_command.h b/ddb/db_command.h
index 3ed1fb90..4208bda8 100644
--- a/ddb/db_command.h
+++ b/ddb/db_command.h
@@ -28,6 +28,9 @@
* Date: 7/90
*/
+#ifndef _DDB_DB_COMMAND_H_
+#define _DDB_DB_COMMAND_H_
+
#if MACH_KDB
/*
@@ -38,9 +41,9 @@
#include <machine/setjmp.h>
extern void db_command_loop(void);
-extern boolean_t db_option(char *, int);
+extern boolean_t db_option(const char *, int) __attribute__ ((pure));
-extern void db_error(char *); /* report error */
+extern void db_error(const char *) __attribute__ ((noreturn)); /* report error */
extern db_addr_t db_dot; /* current location */
extern db_addr_t db_last_addr; /* last explicit address typed */
@@ -50,8 +53,6 @@ extern db_addr_t db_next; /* next address to be examined
or written */
extern jmp_buf_t * db_recover; /* error recovery */
-extern jmp_buf_t * db_recover; /* error recovery */
-
/*
* Command table
*/
@@ -68,6 +69,10 @@ struct db_command {
extern boolean_t db_exec_cmd_nest(char *cmd, int size);
-void db_fncall();
+void db_fncall(void);
+
+void db_help_cmd(void);
#endif /* MACH_KDB */
+
+#endif /* _DDB_DB_COMMAND_H_ */
diff --git a/ddb/db_cond.c b/ddb/db_cond.c
index 60ea4735..31e1d241 100644
--- a/ddb/db_cond.c
+++ b/ddb/db_cond.c
@@ -48,8 +48,7 @@ struct db_cond {
} db_cond[DB_MAX_COND];
void
-db_cond_free(bkpt)
- db_thread_breakpoint_t bkpt;
+db_cond_free(db_thread_breakpoint_t bkpt)
{
if (bkpt->tb_cond > 0) {
db_cond[bkpt->tb_cond-1].c_size = 0;
@@ -59,10 +58,9 @@ db_cond_free(bkpt)
}
boolean_t
-db_cond_check(bkpt)
- db_thread_breakpoint_t bkpt;
+db_cond_check(db_thread_breakpoint_t bkpt)
{
- register struct db_cond *cp;
+ struct db_cond *cp;
db_expr_t value;
int t;
jmp_buf_t db_jmpbuf;
@@ -105,10 +103,10 @@ db_cond_check(bkpt)
void
db_cond_print(bkpt)
- db_thread_breakpoint_t bkpt;
+ const db_thread_breakpoint_t bkpt;
{
- register char *p, *ep;
- register struct db_cond *cp;
+ char *p, *ep;
+ struct db_cond *cp;
if (bkpt->tb_cond <= 0)
return;
@@ -123,11 +121,11 @@ db_cond_print(bkpt)
}
void
-db_cond_cmd()
+db_cond_cmd(void)
{
- register int c;
- register struct db_cond *cp;
- register char *p;
+ int c;
+ struct db_cond *cp;
+ char *p;
db_expr_t value;
db_thread_breakpoint_t bkpt;
diff --git a/ddb/db_cond.h b/ddb/db_cond.h
index dec4967d..6b9c3a5b 100644
--- a/ddb/db_cond.h
+++ b/ddb/db_cond.h
@@ -24,7 +24,7 @@
#include <sys/types.h>
#include <machine/db_machdep.h>
-extern void db_cond_free (db_thread_breakpoint_t bkpt);
+extern void db_cond_free (const db_thread_breakpoint_t bkpt);
extern boolean_t db_cond_check (db_thread_breakpoint_t bkpt);
diff --git a/ddb/db_elf.c b/ddb/db_elf.c
new file mode 100644
index 00000000..10e71621
--- /dev/null
+++ b/ddb/db_elf.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2014 Free Software Foundation, Inc.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2, or (at
+ * your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+/*
+ * Mach Operating System
+ * Copyright (c) 1991,1990 Carnegie Mellon University
+ * All Rights Reserved.
+ *
+ * Permission to use, copy, modify and distribute this software and its
+ * documentation is hereby granted, provided that both the copyright
+ * notice and this permission notice appear in all copies of the
+ * software, derivative works or modified versions, and any portions
+ * thereof, and that both notices appear in supporting documentation.
+ *
+ * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
+ * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
+ * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
+ *
+ * Carnegie Mellon requests users of this software to return to
+ *
+ * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
+ * School of Computer Science
+ * Carnegie Mellon University
+ * Pittsburgh PA 15213-3890
+ *
+ * any improvements or extensions that they make and grant Carnegie Mellon
+ * the rights to redistribute these changes.
+ */
+/*
+ * Author: David B. Golub, Carnegie Mellon University
+ * Date: 7/90
+ */
+
+#if MACH_KDB
+
+/*
+ * Symbol table routines for ELF format files.
+ */
+
+#include <string.h>
+#include <mach/std_types.h>
+#include <mach/exec/elf.h>
+#include <machine/db_machdep.h> /* data types */
+#include <machine/vm_param.h>
+#include <ddb/db_output.h>
+#include <ddb/db_sym.h>
+#include <ddb/db_elf.h>
+
+#ifndef DB_NO_ELF
+
+struct db_symtab_elf {
+ int type;
+ Elf32_Sym *start;
+ Elf32_Sym *end;
+ char *strings;
+ char *map_pointer; /* symbols are for this map only,
+ if not null */
+ char name[SYMTAB_NAME_LEN];
+ /* symtab name */
+};
+
+boolean_t
+elf_db_sym_init (unsigned shdr_num,
+ vm_size_t shdr_size,
+ vm_offset_t shdr_addr,
+ unsigned shdr_shndx,
+ char *name,
+ char *task_addr)
+{
+ Elf32_Shdr *shdr, *symtab, *strtab;
+ const char *shstrtab;
+ int i;
+
+ if (shdr_num == 0)
+ return FALSE;
+
+ if (shdr_size != sizeof *shdr)
+ return FALSE;
+
+ shdr = (Elf32_Shdr *) shdr_addr;
+
+ if (shdr[shdr_shndx].sh_type != SHT_STRTAB)
+ return FALSE;
+
+ shstrtab = (const char *) phystokv (shdr[shdr_shndx].sh_addr);
+
+ symtab = strtab = NULL;
+ for (i = 0; i < shdr_num; i++)
+ switch (shdr[i].sh_type) {
+ case SHT_SYMTAB:
+ if (symtab)
+ db_printf ("Ignoring additional ELF symbol table at %d\n", i);
+ else
+ symtab = &shdr[i];
+ break;
+
+ case SHT_STRTAB:
+ if (strcmp (&shstrtab[shdr[i].sh_name], ".strtab") == 0) {
+ if (strtab)
+ db_printf ("Ignoring additional ELF string table at %d\n", i);
+ else
+ strtab = &shdr[i];
+ }
+ break;
+ }
+
+ if (symtab == NULL || strtab == NULL)
+ return FALSE;
+
+ if (db_add_symbol_table (SYMTAB_ELF,
+ (char *) phystokv (symtab->sh_addr),
+ (char *) phystokv (symtab->sh_addr)+symtab->sh_size,
+ name,
+ (char *) phystokv (strtab->sh_addr),
+ task_addr)) {
+ db_printf ("Loaded ELF symbol table for %s (%d symbols)\n",
+ name, symtab->sh_size / sizeof (Elf32_Sym));
+ return TRUE;
+ }
+
+ return FALSE;
+}
+
+/*
+ * lookup symbol by name
+ */
+db_sym_t
+elf_db_lookup (db_symtab_t *stab,
+ char *symstr)
+{
+ struct db_symtab_elf *self = (struct db_symtab_elf *) stab;
+ Elf32_Sym *s;
+
+ for (s = self->start; s < self->end; s++)
+ if (strcmp (symstr, &self->strings[s->st_name]) == 0)
+ return (db_sym_t) s;
+
+ return NULL;
+}
+
+db_sym_t
+elf_db_search_symbol (db_symtab_t *stab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp) /* in/out */
+{
+ struct db_symtab_elf *self = (struct db_symtab_elf *) stab;
+ unsigned long diff = *diffp;
+ Elf32_Sym *s, *symp = NULL;
+
+ for (s = self->start; s < self->end; s++) {
+ if (s->st_name == 0)
+ continue;
+
+ if (strategy == DB_STGY_XTRN && (s->st_info & STB_GLOBAL) == 0)
+ continue;
+
+ if (off >= s->st_value) {
+ if (s->st_info == STT_FUNC)
+ continue;
+
+ if (off - s->st_value < diff) {
+ diff = off - s->st_value;
+ symp = s;
+ if (diff == 0 && (s->st_info & STB_GLOBAL))
+ break;
+ } else if (off - s->st_value == diff) {
+ if (symp == NULL)
+ symp = s;
+ else if ((symp->st_info & STB_GLOBAL) == 0
+ && (s->st_info & STB_GLOBAL) != 0)
+ symp = s; /* pick the external symbol */
+ }
+ }
+ }
+
+ if (symp == NULL)
+ *diffp = off;
+ else
+ *diffp = diff;
+
+ return (db_sym_t) symp;
+}
+
+/*
+ * Return the name and value for a symbol.
+ */
+void
+elf_db_symbol_values (db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep)
+{
+ struct db_symtab_elf *self = (struct db_symtab_elf *) stab;
+ Elf32_Sym *s = (Elf32_Sym *) sym;
+
+ if (namep)
+ *namep = &self->strings[s->st_name];
+ if (valuep)
+ *valuep = s->st_value;
+}
+
+/*
+ * Find filename and lineno within, given the current pc.
+ */
+boolean_t
+elf_db_line_at_pc (db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_addr_t pc)
+{
+ /* XXX Parse DWARF information. */
+ return FALSE;
+}
+
+#endif /* DB_NO_ELF */
+
+#endif /* MACH_KDB */
diff --git a/ddb/db_elf.h b/ddb/db_elf.h
new file mode 100644
index 00000000..12b82868
--- /dev/null
+++ b/ddb/db_elf.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DDB_DB_ELF_H_
+#define _DDB_DB_ELF_H_
+
+#include <ddb/db_sym.h>
+#include <machine/db_machdep.h>
+
+extern boolean_t
+elf_db_line_at_pc(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **file,
+ int *line,
+ db_addr_t pc);
+
+extern db_sym_t
+elf_db_lookup(
+ db_symtab_t *stab,
+ char * symstr);
+
+extern db_sym_t
+elf_db_search_symbol(
+ db_symtab_t * symtab,
+ db_addr_t off,
+ db_strategy_t strategy,
+ db_expr_t *diffp);
+
+extern void
+elf_db_symbol_values(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep);
+
+#endif /* _DDB_DB_ELF_H_ */
diff --git a/ddb/db_examine.c b/ddb/db_examine.c
index 96c5eee1..836b0e89 100644
--- a/ddb/db_examine.c
+++ b/ddb/db_examine.c
@@ -53,9 +53,6 @@ int db_examine_count = 1;
db_addr_t db_examine_prev_addr = 0;
thread_t db_examine_thread = THREAD_NULL;
-extern db_addr_t db_disasm(db_addr_t pc, boolean_t altform, task_t task);
- /* instruction disassembler */
-
/*
* Examine (print) data.
*/
@@ -65,10 +62,9 @@ db_examine_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
thread_t thread;
- boolean_t db_option();
if (modif[0] != '\0')
db_strcpy(db_examine_format, modif);
@@ -82,7 +78,7 @@ db_examine_cmd(addr, have_addr, count, modif)
return;
}
else
- if (db_option(modif,'u'))
+ if (db_option(modif, 'u'))
thread = current_thread();
else
thread = THREAD_NULL;
@@ -98,7 +94,7 @@ db_examine_forward(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
db_examine(db_next, db_examine_format, db_examine_count,
db_thread_to_task(db_examine_thread));
@@ -110,7 +106,7 @@ db_examine_backward(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
db_examine(db_examine_prev_addr - (db_next - db_examine_prev_addr),
@@ -120,9 +116,8 @@ db_examine_backward(addr, have_addr, count, modif)
void
db_examine(addr, fmt, count, task)
- register
db_addr_t addr;
- char * fmt; /* format string */
+ const char * fmt; /* format string */
int count; /* repeat count */
task_t task;
{
@@ -130,7 +125,7 @@ db_examine(addr, fmt, count, task)
db_expr_t value;
int size; /* in bytes */
int width;
- char * fp;
+ const char * fp;
db_examine_prev_addr = addr;
while (--count >= 0) {
@@ -163,7 +158,7 @@ db_examine(addr, fmt, count, task)
db_printf(":\t");
break;
case 'm':
- db_next = db_xcdump(addr, size, count+1, task);
+ db_next = db_xcdump(addr, size, count + 1, task);
return;
default:
if (db_print_position() == 0) {
@@ -171,7 +166,7 @@ db_examine(addr, fmt, count, task)
char * name;
db_addr_t off;
- db_find_task_sym_and_offset(addr,&name,&off,task);
+ db_find_task_sym_and_offset(addr, &name, &off, task);
if (off == 0)
db_printf("%s:\t", name);
else
@@ -260,7 +255,7 @@ char db_print_format = 'x';
/*ARGSUSED*/
void
-db_print_cmd()
+db_print_cmd(void)
{
db_expr_t value;
int t;
@@ -326,9 +321,9 @@ db_print_cmd()
}
void
-db_print_loc_and_inst(loc, task)
- db_addr_t loc;
- task_t task;
+db_print_loc_and_inst(
+ db_addr_t loc,
+ task_t task)
{
db_task_printsym(loc, DB_STGY_PROC, task);
db_printf(":\t");
@@ -337,20 +332,19 @@ db_print_loc_and_inst(loc, task)
void
db_strcpy(dst, src)
- register char *dst;
- register char *src;
+ char *dst;
+ const char *src;
{
while ((*dst++ = *src++))
;
}
-void db_search(); /*forward*/
/*
* Search for a value in memory.
* Syntax: search [/bhl] addr value [mask] [,count] [thread]
*/
void
-db_search_cmd()
+db_search_cmd(void)
{
int t;
db_addr_t addr;
@@ -360,7 +354,7 @@ db_search_cmd()
db_addr_t count;
thread_t thread;
boolean_t thread_flag = FALSE;
- register char *p;
+ char *p;
t = db_read_token();
if (t == tSLASH) {
@@ -395,7 +389,7 @@ db_search_cmd()
size = sizeof(int);
}
- if (!db_expression(&addr)) {
+ if (!db_expression((db_expr_t *)&addr)) {
db_printf("Address missing\n");
db_flush_lex();
return;
@@ -412,7 +406,7 @@ db_search_cmd()
t = db_read_token();
if (t == tCOMMA) {
- if (!db_expression(&count)) {
+ if (!db_expression((db_expr_t *)&count)) {
db_printf("Count missing\n");
db_flush_lex();
return;
@@ -431,18 +425,17 @@ db_search_cmd()
}
void
-db_search(addr, size, value, mask, count, task)
- register
- db_addr_t addr;
- int size;
- db_expr_t value;
- db_expr_t mask;
- unsigned int count;
- task_t task;
+db_search(
+ db_addr_t addr,
+ int size,
+ db_expr_t value,
+ db_expr_t mask,
+ unsigned int count,
+ task_t task)
{
while (count-- != 0) {
db_prev = addr;
- if ((db_get_task_value(addr,size,FALSE,task) & mask) == value)
+ if ((db_get_task_value(addr, size, FALSE, task) & mask) == value)
break;
addr += size;
}
@@ -452,13 +445,13 @@ db_search(addr, size, value, mask, count, task)
#define DB_XCDUMP_NC 16
int
-db_xcdump(addr, size, count, task)
- db_addr_t addr;
- int size;
- int count;
- task_t task;
+db_xcdump(
+ db_addr_t addr,
+ int size,
+ int count,
+ task_t task)
{
- register int i, n;
+ int i, n;
db_expr_t value;
int bcount;
db_addr_t off;
diff --git a/ddb/db_examine.h b/ddb/db_examine.h
index e1fb1eee..df578a02 100644
--- a/ddb/db_examine.h
+++ b/ddb/db_examine.h
@@ -29,13 +29,13 @@ extern void db_examine_cmd (
db_expr_t addr,
int have_addr,
db_expr_t count,
- char *modif);
+ const char *modif);
-extern void db_strcpy (char *dst, char *src);
+extern void db_strcpy (char *dst, const char *src);
extern void db_examine (
db_addr_t addr,
- char *fmt,
+ const char *fmt,
int count,
task_t task);
@@ -43,13 +43,13 @@ void db_examine_forward(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
void db_examine_backward(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
extern void db_print_loc_and_inst (
db_addr_t loc,
@@ -61,8 +61,22 @@ int db_xcdump(
int count,
task_t task);
-void db_print_cmd();
+void db_print_cmd(void);
-void db_search_cmd();
+void db_search_cmd(void);
+
+void db_search(
+ db_addr_t addr,
+ int size,
+ db_expr_t value,
+ db_expr_t mask,
+ unsigned int count,
+ task_t task);
+
+/* instruction disassembler */
+extern db_addr_t db_disasm(
+ db_addr_t pc,
+ boolean_t altform,
+ task_t task);
#endif /* _DDB_DB_EXAMINE_H_ */
diff --git a/ddb/db_expr.c b/ddb/db_expr.c
index 611baa09..c9e6752a 100644
--- a/ddb/db_expr.c
+++ b/ddb/db_expr.c
@@ -41,10 +41,8 @@
#include <ddb/db_variables.h>
#include <kern/task.h>
-
boolean_t
-db_term(valuep)
- db_expr_t *valuep;
+db_term(db_expr_t *valuep)
{
int t;
@@ -95,12 +93,12 @@ db_term(valuep)
int
db_size_option(modif, u_option, t_option)
- char *modif;
+ const char *modif;
boolean_t *u_option;
boolean_t *t_option;
{
- register char *p;
- int size = sizeof(int);
+ const char *p;
+ int size = sizeof(int);
*u_option = FALSE;
*t_option = FALSE;
@@ -127,8 +125,7 @@ db_size_option(modif, u_option, t_option)
}
boolean_t
-db_unary(valuep)
- db_expr_t *valuep;
+db_unary(db_expr_t *valuep)
{
int t;
int size;
@@ -177,10 +174,9 @@ db_unary(valuep)
}
boolean_t
-db_mult_expr(valuep)
- db_expr_t *valuep;
+db_mult_expr(db_expr_t *valuep)
{
- db_expr_t lhs, rhs;
+ db_expr_t lhs = 0, rhs;
int t;
char c;
@@ -223,8 +219,7 @@ db_mult_expr(valuep)
}
boolean_t
-db_add_expr(valuep)
- db_expr_t *valuep;
+db_add_expr(db_expr_t *valuep)
{
db_expr_t lhs, rhs;
int t;
@@ -255,8 +250,7 @@ db_add_expr(valuep)
}
boolean_t
-db_shift_expr(valuep)
- db_expr_t *valuep;
+db_shift_expr(db_expr_t *valuep)
{
db_expr_t lhs, rhs;
int t;
@@ -290,8 +284,7 @@ db_shift_expr(valuep)
}
boolean_t
-db_logical_relation_expr(valuep)
- db_expr_t *valuep;
+db_logical_relation_expr(db_expr_t *valuep)
{
db_expr_t lhs, rhs;
int t;
@@ -340,8 +333,7 @@ db_logical_relation_expr(valuep)
}
boolean_t
-db_logical_and_expr(valuep)
- db_expr_t *valuep;
+db_logical_and_expr(db_expr_t *valuep)
{
db_expr_t lhs, rhs;
int t;
@@ -363,8 +355,7 @@ db_logical_and_expr(valuep)
}
boolean_t
-db_logical_or_expr(valuep)
- db_expr_t *valuep;
+db_logical_or_expr(db_expr_t *valuep)
{
db_expr_t lhs, rhs;
int t;
@@ -386,8 +377,7 @@ db_logical_or_expr(valuep)
}
int
-db_expression(valuep)
- db_expr_t *valuep;
+db_expression(db_expr_t *valuep)
{
return (db_logical_or_expr(valuep));
}
diff --git a/ddb/db_expr.h b/ddb/db_expr.h
index 989b66be..9c304e69 100644
--- a/ddb/db_expr.h
+++ b/ddb/db_expr.h
@@ -17,7 +17,7 @@
*/
int db_size_option(
- char *modif,
+ const char *modif,
boolean_t *u_option,
boolean_t *t_option);
diff --git a/ddb/db_ext_symtab.c b/ddb/db_ext_symtab.c
index 9831a01c..cafb0c4c 100644
--- a/ddb/db_ext_symtab.c
+++ b/ddb/db_ext_symtab.c
@@ -46,12 +46,12 @@
* the caller and the kernel debugger agree on its format.
*/
kern_return_t
-host_load_symbol_table(host, task, name, symtab, symtab_count)
- host_t host;
- task_t task;
- char * name;
- pointer_t symtab;
- unsigned int symtab_count;
+host_load_symbol_table(
+ host_t host,
+ task_t task,
+ char * name,
+ pointer_t symtab,
+ unsigned int symtab_count)
{
kern_return_t result;
vm_offset_t symtab_start;
diff --git a/ddb/db_input.c b/ddb/db_input.c
index 6e7fa00b..6b6db764 100644
--- a/ddb/db_input.c
+++ b/ddb/db_input.c
@@ -69,8 +69,8 @@ char * db_history_prev = (char *) 0; /* start of previous line */
void
db_putstring(s, count)
- char *s;
- int count;
+ const char *s;
+ int count;
{
while (--count >= 0)
cnputc(*s++);
@@ -91,11 +91,11 @@ db_putnchars(c, count)
#define DEL_FWD 0
#define DEL_BWD 1
void
-db_delete(n, bwd)
- int n;
- int bwd;
+db_delete(
+ int n,
+ int bwd)
{
- register char *p;
+ char *p;
if (bwd) {
db_lc -= n;
@@ -111,7 +111,7 @@ db_delete(n, bwd)
}
void
-db_delete_line()
+db_delete_line(void)
{
db_delete(db_le - db_lc, DEL_FWD);
db_delete(db_lc - db_lbuf_start, DEL_BWD);
@@ -133,12 +133,11 @@ db_delete_line()
db_history_curr = db_history + \
db_history_size - 1; \
} while (0)
-#endif
+#endif /* DB_HISTORY_SIZE */
/* returns TRUE at end-of-line */
boolean_t
-db_inputchar(c)
- int c;
+db_inputchar(int c)
{
switch (c) {
case CTRL('b'):
@@ -214,7 +213,7 @@ db_inputchar(c)
INC_DB_CURR();
db_le = db_lc = db_lbuf_start;
} else {
- register char *p;
+ char *p;
INC_DB_CURR();
for (p = db_history_curr, db_le = db_lbuf_start;
*p; ) {
@@ -237,7 +236,7 @@ db_inputchar(c)
INC_DB_CURR();
db_delete_line();
if (db_history_curr != db_history_last) {
- register char *p;
+ char *p;
for (p = db_history_curr,
db_le = db_lbuf_start; *p;) {
*db_le++ = *p++;
@@ -251,7 +250,7 @@ db_inputchar(c)
db_putstring(db_lbuf_start, db_le - db_lbuf_start);
}
break;
-#endif
+#endif /* DB_HISTORY_SIZE */
case CTRL('r'):
db_putstring("^R\n", 3);
if (db_le > db_lbuf_start) {
@@ -268,7 +267,7 @@ db_inputchar(c)
* save it.
*/
if (db_history_curr == db_history_prev) {
- register char *pp, *pc;
+ char *pp, *pc;
/*
* Is it the same?
@@ -292,7 +291,7 @@ db_inputchar(c)
}
}
if (db_le != db_lbuf_start) {
- register char *p;
+ char *p;
db_history_prev = db_history_last;
for (p = db_lbuf_start; p != db_le; p++) {
*db_history_last++ = *p;
@@ -304,7 +303,7 @@ db_inputchar(c)
*db_history_last++ = '\0';
}
db_history_curr = db_history_last;
-#endif
+#endif /* DB_HISTORY_SIZE */
*db_le++ = c;
return (TRUE);
default:
@@ -312,7 +311,7 @@ db_inputchar(c)
cnputc('\007');
}
else if (c >= ' ' && c <= '~') {
- register char *p;
+ char *p;
for (p = db_le; p > db_lc; p--)
*p = *(p-1);
@@ -328,9 +327,9 @@ db_inputchar(c)
}
int
-db_readline(lstart, lsize)
- char * lstart;
- int lsize;
+db_readline(
+ char * lstart,
+ int lsize)
{
db_force_whitespace(); /* synch output position */
@@ -349,9 +348,9 @@ db_readline(lstart, lsize)
}
void
-db_check_interrupt()
+db_check_interrupt(void)
{
- register int c;
+ int c;
c = cnmaygetc();
switch (c) {
diff --git a/ddb/db_input.h b/ddb/db_input.h
index 316e3268..77f07bb6 100644
--- a/ddb/db_input.h
+++ b/ddb/db_input.h
@@ -25,4 +25,6 @@
extern int db_readline (char *lstart, int lsize);
+extern void db_check_interrupt(void);
+
#endif /* _DDB_DB_INPUT_H_ */
diff --git a/ddb/db_lex.c b/ddb/db_lex.c
index ebffe062..8ab69106 100644
--- a/ddb/db_lex.c
+++ b/ddb/db_lex.c
@@ -50,7 +50,7 @@ db_expr_t db_look_token = 0;
int
db_read_line(repeat_last)
- char *repeat_last;
+ const char *repeat_last;
{
int i;
@@ -82,9 +82,9 @@ db_flush_line(void)
}
void
-db_switch_input(buffer, size)
- char *buffer;
- int size;
+db_switch_input(
+ char *buffer,
+ int size)
{
db_lp = buffer;
db_last_lp = db_lp;
@@ -94,8 +94,7 @@ db_switch_input(buffer, size)
}
void
-db_save_lex_context(lp)
- register struct db_lex_context *lp;
+db_save_lex_context(struct db_lex_context *lp)
{
lp->l_ptr = db_lp;
lp->l_eptr = db_endlp;
@@ -105,7 +104,7 @@ db_save_lex_context(lp)
void
db_restore_lex_context(lp)
- register struct db_lex_context *lp;
+ const struct db_lex_context *lp;
{
db_lp = lp->l_ptr;
db_last_lp = db_lp;
@@ -131,15 +130,13 @@ db_read_char(void)
}
void
-db_unread_char(c)
- int c;
+db_unread_char(int c)
{
db_look_char = c;
}
void
-db_unread_token(t)
- int t;
+db_unread_token(int t)
{
db_look_token = t;
}
@@ -179,10 +176,10 @@ db_flush_lex(void)
void
db_skip_to_eol(void)
{
- register int skip;
- register int t;
- register int n;
- register char *p;
+ int skip;
+ int t;
+ int n;
+ char *p;
t = db_read_token();
p = db_last_lp;
@@ -205,8 +202,8 @@ db_skip_to_eol(void)
int
db_lex(void)
{
- register char *cp;
- register int c;
+ char *cp;
+ int c;
c = db_read_char();
while (c <= ' ' || c > '~') {
diff --git a/ddb/db_lex.h b/ddb/db_lex.h
index dc9da0a9..f7677df8 100644
--- a/ddb/db_lex.h
+++ b/ddb/db_lex.h
@@ -31,6 +31,9 @@
* Lexical analyzer.
*/
+#ifndef _DDB_DB_LEX_H_
+#define _DDB_DB_LEX_H_
+
#define TOK_STRING_SIZE 64
#define DB_LEX_LINE_SIZE 256
@@ -42,7 +45,7 @@ struct db_lex_context {
};
extern int db_lex(void);
-extern int db_read_line(char *rep_str);
+extern int db_read_line(const char *rep_str);
extern void db_flush_line(void);
extern int db_read_char(void);
extern void db_unread_char(int c);
@@ -51,7 +54,7 @@ extern void db_unread_token(int t);
extern void db_flush_lex(void);
extern void db_switch_input(char *, int);
extern void db_save_lex_context(struct db_lex_context *);
-extern void db_restore_lex_context(struct db_lex_context *);
+extern void db_restore_lex_context(const struct db_lex_context *);
extern void db_skip_to_eol(void);
extern db_expr_t db_tok_number;
@@ -92,3 +95,5 @@ extern db_expr_t db_radix;
#define tLOG_OR 31
#define tSTRING 32
#define tQUESTION 33
+
+#endif /* _DDB_DB_LEX_H_ */
diff --git a/ddb/db_macro.c b/ddb/db_macro.c
index 43bb5837..307b7c59 100644
--- a/ddb/db_macro.c
+++ b/ddb/db_macro.c
@@ -59,9 +59,9 @@ db_expr_t db_macro_args[DB_MACRO_LEVEL][DB_NARGS];
static struct db_user_macro *
db_lookup_macro(name)
- char *name;
+ const char *name;
{
- register struct db_user_macro *mp;
+ struct db_user_macro *mp;
for (mp = db_user_macro; mp < &db_user_macro[DB_NUSER_MACRO]; mp++) {
if (mp->m_name[0] == 0)
@@ -73,11 +73,11 @@ db_lookup_macro(name)
}
void
-db_def_macro_cmd()
+db_def_macro_cmd(void)
{
- register char *p;
- register int c;
- register struct db_user_macro *mp, *ep;
+ char *p;
+ int c;
+ struct db_user_macro *mp, *ep;
if (db_read_token() != tIDENT) {
db_printf("Bad macro name \"%s\"\n", db_tok_string);
@@ -104,9 +104,9 @@ db_def_macro_cmd()
}
void
-db_del_macro_cmd()
+db_del_macro_cmd(void)
{
- register struct db_user_macro *mp;
+ struct db_user_macro *mp;
if (db_read_token() != tIDENT
|| (mp = db_lookup_macro(db_tok_string)) == 0) {
@@ -120,9 +120,9 @@ db_del_macro_cmd()
}
void
-db_show_macro()
+db_show_macro(void)
{
- register struct db_user_macro *mp;
+ struct db_user_macro *mp;
int t;
char *name = 0;
@@ -141,10 +141,10 @@ db_show_macro()
int
db_exec_macro(name)
- char *name;
+ const char *name;
{
- register struct db_user_macro *mp;
- register int n;
+ struct db_user_macro *mp;
+ int n;
if ((mp = db_lookup_macro(name)) == 0)
return(-1);
@@ -165,13 +165,13 @@ db_exec_macro(name)
return(0);
}
-long
+void
/* ARGSUSED */
-db_arg_variable(vp, valuep, flag, ap)
- struct db_variable *vp;
- db_expr_t *valuep;
- int flag;
- db_var_aux_param_t ap;
+db_arg_variable(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
{
if (ap->level != 1 || ap->suffix[0] < 1 || ap->suffix[0] > DB_NARGS) {
db_error("Bad $arg variable\n");
@@ -181,7 +181,7 @@ db_arg_variable(vp, valuep, flag, ap)
*valuep = db_macro_args[db_macro_level][ap->suffix[0]-1];
else
db_macro_args[db_macro_level][ap->suffix[0]-1] = *valuep;
- return(0);
+ return;
}
#endif /* MACH_KDB */
diff --git a/ddb/db_macro.h b/ddb/db_macro.h
index da5626f9..2c0a599b 100644
--- a/ddb/db_macro.h
+++ b/ddb/db_macro.h
@@ -30,9 +30,9 @@ extern void db_del_macro_cmd (void);
extern void db_show_macro (void);
-extern int db_exec_macro (char *name);
+extern int db_exec_macro (const char *name);
-extern long db_arg_variable (
+extern void db_arg_variable (
struct db_variable *vp,
db_expr_t *valuep,
int flag,
diff --git a/ddb/db_mp.c b/ddb/db_mp.c
index cc14aea2..8d1a5605 100644
--- a/ddb/db_mp.c
+++ b/ddb/db_mp.c
@@ -38,6 +38,7 @@
#include <ddb/db_command.h>
#include <ddb/db_run.h>
+#include <ddb/db_mp.h>
/*
* Routines to interlock access to the kernel debugger on
@@ -52,12 +53,7 @@ int db_active[NCPUS] = { 0 }; /* count recursive entries
int db_slave[NCPUS] = { 0 }; /* nonzero if cpu interrupted
by another cpu in debugger */
-int db_enter_debug = 0;
-
-void remote_db(); /* forward */
-void lock_db();
-void unlock_db();
-
+boolean_t db_enter_debug = FALSE;
/*
* Called when entering kernel debugger.
@@ -67,7 +63,7 @@ void unlock_db();
*/
boolean_t
-db_enter()
+db_enter(void)
{
int mycpu = cpu_number();
@@ -112,7 +108,7 @@ db_enter()
* Leave debugger.
*/
void
-db_leave()
+db_leave(void)
{
int mycpu = cpu_number();
@@ -147,9 +143,9 @@ db_leave()
*/
void
-remote_db() {
+remote_db(void) {
int my_cpu = cpu_number();
- register int i;
+ int i;
for (i = 0; i < NCPUS; i++) {
if (i != my_cpu &&
@@ -214,8 +210,7 @@ remote_db() {
* switch to another cpu
*/
void
-db_on(cpu)
- int cpu;
+db_on(int cpu)
{
/*
* Save ddb global variables
@@ -254,7 +249,7 @@ db_on(cpu)
* in kernel debugger and wants to stop other CPUs
*/
void
-remote_db_enter()
+remote_db_enter(void)
{
db_slave[cpu_number()]++;
kdb_kintr();
@@ -271,7 +266,7 @@ remote_db_enter()
* is active on another cpu.
*/
void
-lock_db()
+lock_db(void)
{
int my_cpu = cpu_number();
@@ -280,7 +275,7 @@ lock_db()
if (my_cpu == master_cpu) {
db_console();
}
-#endif
+#endif /* CONSOLE_ON_MASTER */
if (db_cpu != -1 && db_cpu != my_cpu)
continue;
@@ -292,9 +287,9 @@ lock_db()
else {
simple_lock(&db_lock);
}
-#else
+#else /* CONSOLE_ON_MASTER */
simple_lock(&db_lock);
-#endif
+#endif /* CONSOLE_ON_MASTER */
if (db_cpu == -1 || db_cpu == my_cpu)
break;
simple_unlock(&db_lock);
@@ -302,14 +297,14 @@ lock_db()
}
void
-unlock_db()
+unlock_db(void)
{
simple_unlock(&db_lock);
}
-#ifdef sketch
+#if CONSOLE_ON_MASTER
void
-db_console()
+db_console(void)
{
if (i_bit(CBUS_PUT_CHAR, my_word)) {
volatile u_char c = cbus_ochar;
@@ -330,7 +325,7 @@ db_console()
db_cpu = my_cpu;
}
}
-#endif /* sketch */
+#endif /* CONSOLE_ON_MASTER */
#endif /* NCPUS > 1 */
diff --git a/ddb/db_mp.h b/ddb/db_mp.h
new file mode 100644
index 00000000..722f28c7
--- /dev/null
+++ b/ddb/db_mp.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DDB_DB_MP_H_
+#define _DDB_DB_MP_H_
+
+void remote_db(void);
+void lock_db(void);
+void unlock_db(void);
+
+#if CONSOLE_ON_MASTER
+void db_console(void);
+#endif /* CONSOLE_ON_MASTER */
+
+#endif /* _DDB_DB_MP_H_ */
diff --git a/ddb/db_output.c b/ddb/db_output.c
index ec73111a..a34e8070 100644
--- a/ddb/db_output.c
+++ b/ddb/db_output.c
@@ -42,6 +42,7 @@
#include <ddb/db_command.h>
#include <ddb/db_lex.h>
#include <ddb/db_output.h>
+#include <ddb/db_input.h>
/*
* Character output - tracks position in line.
@@ -74,15 +75,13 @@ int db_tab_stop_width = 8; /* how wide are tab stops? */
int db_max_line = DB_MAX_LINE; /* output max lines */
int db_max_width = DB_MAX_WIDTH; /* output line width */
-extern void db_check_interrupt();
-
/*
* Force pending whitespace.
*/
void
db_force_whitespace(void)
{
- register int last_print, next_tab;
+ int last_print, next_tab;
last_print = db_last_non_space;
while (last_print < db_output_position) {
@@ -100,9 +99,9 @@ db_force_whitespace(void)
}
static void
-db_more()
+db_more(void)
{
- register char *p;
+ char *p;
boolean_t quit_output = FALSE;
for (p = "--db_more--"; *p; p++)
@@ -133,8 +132,7 @@ db_more()
* Output character. Buffer whitespace.
*/
void
-db_putchar(c)
- int c; /* character to output */
+db_putchar(int c) /* character to output */
{
if (db_max_line >= DB_MIN_MAX_LINE && db_output_line >= db_max_line-1)
db_more();
@@ -148,7 +146,7 @@ db_putchar(c)
cnputc(c);
db_output_position++;
if (db_max_width >= DB_MIN_MAX_WIDTH
- && db_output_position >= db_max_width-1) {
+ && db_output_position >= db_max_width) {
/* auto new line */
cnputc('\n');
db_output_position = 0;
@@ -189,7 +187,7 @@ db_id_putc(char c, vm_offset_t dummy)
/*
* Return output position
*/
-int
+int __attribute__ ((pure))
db_print_position(void)
{
return (db_output_position);
@@ -210,21 +208,6 @@ db_printf(const char *fmt, ...)
{
va_list listp;
-#ifdef db_printf_enter
- db_printf_enter(); /* optional multiP serialization */
-#endif
- va_start(listp, fmt);
- _doprnt(fmt, listp, db_id_putc, db_radix, 0);
- va_end(listp);
-}
-
-/* alternate name */
-
-/*VARARGS1*/
-void
-kdbprintf(const char *fmt, ...)
-{
- va_list listp;
va_start(listp, fmt);
_doprnt(fmt, listp, db_id_putc, db_radix, 0);
va_end(listp);
diff --git a/ddb/db_output.h b/ddb/db_output.h
index 1159c6ba..497ae430 100644
--- a/ddb/db_output.h
+++ b/ddb/db_output.h
@@ -32,9 +32,15 @@
* Printing routines for kernel debugger.
*/
+#ifndef _DDB_DB_OUTPUT_H_
+#define _DDB_DB_OUTPUT_H_
+
extern void db_force_whitespace(void);
-extern int db_print_position(void);
+extern int db_print_position(void) __attribute__ ((pure));
extern void db_end_line(void);
extern void db_printf(const char *fmt, ...);
+/* alternate name */
+#define kdbprintf db_printf
extern void db_putchar(int c);
-extern void kdbprintf(const char *fmt, ...);
+
+#endif /* _DDB_DB_OUTPUT_H_ */
diff --git a/ddb/db_print.c b/ddb/db_print.c
index 4692e4c4..fb4efaad 100644
--- a/ddb/db_print.c
+++ b/ddb/db_print.c
@@ -53,21 +53,21 @@
#include <ddb/db_task_thread.h>
#include <ddb/db_print.h>
-extern unsigned int db_maxoff;
+extern unsigned long db_maxoff;
/* ARGSUSED */
void
-db_show_regs(addr, have_addr, count, modif)
- db_expr_t addr;
- boolean_t have_addr;
- db_expr_t count;
- char *modif;
+db_show_regs(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ char *modif)
{
- register struct db_variable *regp;
+ struct db_variable *regp;
db_expr_t value;
db_addr_t offset;
char * name;
- register int i;
+ int i;
struct db_var_aux_param aux_param;
task_t task = TASK_NULL;
@@ -127,10 +127,10 @@ db_show_regs(addr, have_addr, count, modif)
char *
db_thread_stat(thread, status)
- register thread_t thread;
- char *status;
+ const thread_t thread;
+ char *status;
{
- register char *p = status;
+ char *p = status;
*p++ = (thread->state & TH_RUN) ? 'R' : '.';
*p++ = (thread->state & TH_WAIT) ? 'W' : '.';
@@ -144,10 +144,10 @@ db_thread_stat(thread, status)
}
void
-db_print_thread(thread, thread_id, flag)
- thread_t thread;
- int thread_id;
- int flag;
+db_print_thread(
+ thread_t thread,
+ int thread_id,
+ int flag)
{
if (flag & OPTION_USER) {
char status[8];
@@ -194,12 +194,8 @@ db_print_thread(thread, thread_id, flag)
2*sizeof(vm_offset_t), thread);
else
db_printf("(%0*X) ", 2*sizeof(vm_offset_t), thread);
- db_printf("%c%c%c%c%c",
- (thread->state & TH_RUN) ? 'R' : ' ',
- (thread->state & TH_WAIT) ? 'W' : ' ',
- (thread->state & TH_SUSP) ? 'S' : ' ',
- (thread->state & TH_UNINT)? 'N' : ' ',
- db_thread_fp_used(thread) ? 'F' : ' ');
+ char status[8];
+ db_printf("%s", db_thread_stat(thread, status));
if (thread->state & TH_SWAPPED) {
if (thread->swap_func) {
db_printf("(");
@@ -220,10 +216,10 @@ db_print_thread(thread, thread_id, flag)
}
void
-db_print_task(task, task_id, flag)
- task_t task;
- int task_id;
- int flag;
+db_print_task(
+ task_t task,
+ int task_id,
+ int flag)
{
thread_t thread;
int thread_id;
@@ -258,7 +254,12 @@ db_print_task(task, task_id, flag)
} else {
if (flag & OPTION_TASK_TITLE)
db_printf(" TASK THREADS\n");
- db_printf("%3d (%0*X): ", task_id, 2*sizeof(vm_offset_t), task);
+ if (task->name[0])
+ db_printf("%3d %s (%0*X): ", task_id, task->name,
+ 2*sizeof(vm_offset_t), task);
+ else
+ db_printf("%3d (%0*X): ", task_id,
+ 2*sizeof(vm_offset_t), task);
if (task->thread_count == 0) {
db_printf("no threads\n");
} else {
@@ -275,13 +276,37 @@ db_print_task(task, task_id, flag)
}
}
+void
+db_show_all_tasks(db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif)
+{
+ task_t task;
+ int task_id = 0;
+ processor_set_t pset;
+
+ db_printf(" ID %-*s NAME [THREADS]\n", 2*sizeof(vm_offset_t), "TASK");
+
+ queue_iterate(&all_psets, pset, processor_set_t, all_psets)
+ queue_iterate(&pset->tasks, task, task_t, pset_tasks) {
+ db_printf("%3d %0*X %s [%d]\n",
+ task_id,
+ 2*sizeof(vm_offset_t),
+ task,
+ task->name,
+ task->thread_count);
+ task_id++;
+ }
+}
+
/*ARGSUSED*/
void
db_show_all_threads(addr, have_addr, count, modif)
db_expr_t addr;
boolean_t have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
task_t task;
int task_id;
@@ -332,7 +357,7 @@ db_show_one_thread(addr, have_addr, count, modif)
db_expr_t addr;
boolean_t have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
int flag;
int thread_id;
@@ -378,7 +403,7 @@ db_show_one_task(addr, have_addr, count, modif)
db_expr_t addr;
boolean_t have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
int flag;
int task_id;
@@ -410,41 +435,33 @@ db_show_one_task(addr, have_addr, count, modif)
int
db_port_iterate(thread, func)
- thread_t thread;
+ const thread_t thread;
void (*func)();
{
ipc_entry_t entry;
- int index;
int n = 0;
- int size;
- ipc_space_t space;
-
- space = thread->task->itk_space;
- entry = space->is_table;
- size = space->is_table_size;
- for (index = 0; index < size; index++, entry++) {
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&thread->task->itk_space->is_map, &iter, entry) {
if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
- (*func)(index, (ipc_port_t) entry->ie_object,
+ (*func)(entry->ie_name, (ipc_port_t) entry->ie_object,
entry->ie_bits, n++);
}
return(n);
}
ipc_port_t
-db_lookup_port(thread, id)
- thread_t thread;
- int id;
+db_lookup_port(
+ thread_t thread,
+ int id)
{
- register ipc_space_t space;
- register ipc_entry_t entry;
+ ipc_entry_t entry;
if (thread == THREAD_NULL)
return(0);
- space = thread->task->itk_space;
- if (id < 0 || id >= space->is_table_size)
+ if (id < 0)
return(0);
- entry = &space->is_table[id];
- if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
+ entry = ipc_entry_lookup(thread->task->itk_space, (mach_port_t) id);
+ if (entry && entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS)
return((ipc_port_t)entry->ie_object);
return(0);
}
@@ -452,7 +469,7 @@ db_lookup_port(thread, id)
static void
db_print_port_id(id, port, bits, n)
int id;
- ipc_port_t port;
+ const ipc_port_t port;
unsigned bits;
int n;
{
@@ -466,7 +483,7 @@ db_print_port_id(id, port, bits, n)
static void
db_print_port_id_long(
int id,
- ipc_port_t port,
+ const ipc_port_t port,
unsigned bits,
int n)
{
@@ -484,7 +501,7 @@ db_show_port_id(addr, have_addr, count, modif)
db_expr_t addr;
boolean_t have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
thread_t thread;
diff --git a/ddb/db_print.h b/ddb/db_print.h
index 898014e8..87db97be 100644
--- a/ddb/db_print.h
+++ b/ddb/db_print.h
@@ -24,25 +24,31 @@ void db_show_one_task(
db_expr_t addr,
boolean_t have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
void db_show_port_id(
db_expr_t addr,
boolean_t have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
void db_show_one_thread(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
+
+void db_show_all_tasks(
+ db_expr_t addr,
+ int have_addr,
+ db_expr_t count,
+ const char * modif);
void db_show_all_threads(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
db_addr_t db_task_from_space(
ipc_space_t space,
diff --git a/ddb/db_run.c b/ddb/db_run.c
index 53a02ce1..9b467fc4 100644
--- a/ddb/db_run.c
+++ b/ddb/db_run.c
@@ -59,24 +59,13 @@ int db_last_inst_count;
int db_load_count;
int db_store_count;
-#ifndef db_set_single_step
-void db_set_task_single_step(/* db_regs_t *, task_t */);/* forward */
-#else
-#define db_set_task_single_step(regs,task) db_set_single_step(regs)
-#endif
-#ifndef db_clear_single_step
-void db_clear_task_single_step(/* db_regs_t *, task_t */);
-#else
-#define db_clear_task_single_step(regs,task) db_clear_single_step(regs)
-#endif
-
boolean_t
-db_stop_at_pc(is_breakpoint, task)
- boolean_t *is_breakpoint;
- task_t task;
+db_stop_at_pc(
+ boolean_t *is_breakpoint,
+ task_t task)
{
- register db_addr_t pc;
- register db_thread_breakpoint_t bkpt;
+ db_addr_t pc;
+ db_thread_breakpoint_t bkpt;
db_clear_task_single_step(DDB_REGS, task);
db_clear_breakpoints();
@@ -92,7 +81,7 @@ db_stop_at_pc(is_breakpoint, task)
FIXUP_PC_AFTER_BREAK
pc = PC_REGS(DDB_REGS);
}
-#endif
+#endif /* FIXUP_PC_AFTER_BREAK */
/*
* Now check for a breakpoint at this address.
@@ -131,7 +120,7 @@ db_stop_at_pc(is_breakpoint, task)
(!inst_return(ins) || --db_call_depth != 0)) {
if (db_sstep_print) {
if (inst_call(ins) || inst_return(ins)) {
- register int i;
+ int i;
db_printf("[after %6d /%4d] ",
db_inst_count,
@@ -167,32 +156,32 @@ db_stop_at_pc(is_breakpoint, task)
}
void
-db_restart_at_pc(watchpt, task)
- boolean_t watchpt;
- task_t task;
+db_restart_at_pc(
+ boolean_t watchpt,
+ task_t task)
{
- register db_addr_t pc = PC_REGS(DDB_REGS), brpc;
+ db_addr_t pc = PC_REGS(DDB_REGS);
if ((db_run_mode == STEP_COUNT) ||
(db_run_mode == STEP_RETURN) ||
(db_run_mode == STEP_CALLT)) {
- db_expr_t ins;
/*
* We are about to execute this instruction,
* so count it now.
*/
- ins = db_get_task_value(pc, sizeof(int), FALSE, task);
+ db_get_task_value(pc, sizeof(int), FALSE, task);
db_inst_count++;
db_load_count += inst_load(ins);
db_store_count += inst_store(ins);
#ifdef SOFTWARE_SSTEP
+ db_addr_t brpc;
/* Account for instructions in delay slots */
- brpc = next_instr_address(pc,1,task);
+ brpc = next_instr_address(pc, 1, task);
if ((brpc != pc) && (inst_branch(ins) || inst_call(ins))) {
/* Note: this ~assumes an instruction <= sizeof(int) */
- ins = db_get_task_value(brpc, sizeof(int), FALSE, task);
+ db_get_task_value(brpc, sizeof(int), FALSE, task);
db_inst_count++;
db_load_count += inst_load(ins);
db_store_count += inst_store(ins);
@@ -217,9 +206,9 @@ db_restart_at_pc(watchpt, task)
}
void
-db_single_step(regs, task)
- db_regs_t *regs;
- task_t task;
+db_single_step(
+ db_regs_t *regs,
+ task_t task)
{
if (db_run_mode == STEP_CONTINUE) {
db_run_mode = STEP_INVISIBLE;
@@ -260,10 +249,10 @@ db_single_step(regs, task)
db_breakpoint_t db_not_taken_bkpt = 0;
db_breakpoint_t db_taken_bkpt = 0;
-db_breakpoint_t
+db_breakpoint_t __attribute__ ((pure))
db_find_temp_breakpoint(task, addr)
- task_t task;
- db_addr_t addr;
+ const task_t task;
+ db_addr_t addr;
{
if (db_taken_bkpt && (db_taken_bkpt->address == addr) &&
db_taken_bkpt->task == task)
@@ -275,13 +264,13 @@ db_find_temp_breakpoint(task, addr)
}
void
-db_set_task_single_step(regs, task)
- register db_regs_t *regs;
- task_t task;
+db_set_task_single_step(
+ db_regs_t *regs,
+ task_t task)
{
db_addr_t pc = PC_REGS(regs), brpc;
- register unsigned int inst;
- register boolean_t unconditional;
+ unsigned int inst;
+ boolean_t unconditional;
/*
* User was stopped at pc, e.g. the instruction
@@ -321,8 +310,8 @@ db_set_task_single_step(regs, task)
void
db_clear_task_single_step(regs, task)
- db_regs_t *regs;
- task_t task;
+ const db_regs_t *regs;
+ task_t task;
{
if (db_taken_bkpt != 0) {
db_delete_temp_breakpoint(task, db_taken_bkpt);
@@ -346,7 +335,7 @@ db_single_step_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
boolean_t print = FALSE;
@@ -374,7 +363,7 @@ db_trace_until_call_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
boolean_t print = FALSE;
@@ -397,7 +386,7 @@ db_trace_until_matching_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
boolean_t print = FALSE;
@@ -422,7 +411,7 @@ db_continue_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
if (modif[0] == 'c')
db_run_mode = STEP_COUNT;
@@ -437,7 +426,7 @@ db_continue_cmd(addr, have_addr, count, modif)
}
boolean_t
-db_in_single_step()
+db_in_single_step(void)
{
return(db_run_mode != STEP_NONE && db_run_mode != STEP_CONTINUE);
}
diff --git a/ddb/db_run.h b/ddb/db_run.h
index e138f604..c042d4ca 100644
--- a/ddb/db_run.h
+++ b/ddb/db_run.h
@@ -24,6 +24,9 @@
* the rights to redistribute these changes.
*/
+#ifndef _DDB_DB_RUN_H_
+#define _DDB_DB_RUN_H_
+
#include <kern/task.h>
#include <machine/db_machdep.h>
@@ -43,26 +46,49 @@ extern void db_single_step(db_regs_t *regs, task_t task);
extern void db_single_step_cmd(
db_expr_t addr,
- int have_addr,
+ int have_addr,
db_expr_t count,
- char *modif);
+ const char *modif);
void db_trace_until_call_cmd(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
void db_trace_until_matching_cmd(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
void db_continue_cmd(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
+
+#ifndef db_set_single_step
+void db_set_task_single_step(db_regs_t *, task_t);
+#else
+#define db_set_task_single_step(regs, task) db_set_single_step(regs)
+#endif
+#ifndef db_clear_single_step
+void db_clear_task_single_step(const db_regs_t *, task_t);
+#else
+#define db_clear_task_single_step(regs, task) db_clear_single_step(regs)
+#endif
extern boolean_t db_in_single_step(void);
+
+extern void
+db_restart_at_pc(
+ boolean_t watchpt,
+ task_t task);
+
+extern boolean_t
+db_stop_at_pc(
+ boolean_t *is_breakpoint,
+ task_t task);
+
+#endif /* _DDB_DB_RUN_H_ */
diff --git a/ddb/db_sym.c b/ddb/db_sym.c
index 5c5f7006..2abd5746 100644
--- a/ddb/db_sym.c
+++ b/ddb/db_sym.c
@@ -37,6 +37,8 @@
#include <ddb/db_output.h>
#include <ddb/db_sym.h>
#include <ddb/db_task_thread.h>
+#include <ddb/db_aout.h>
+#include <ddb/db_elf.h>
#include <vm/vm_map.h> /* vm_map_t */
@@ -50,21 +52,19 @@ int db_nsymtab = 0;
db_symtab_t *db_last_symtab;
-db_sym_t db_lookup(); /* forward */
-
/*
* Add symbol table, with given name, to list of symbol tables.
*/
boolean_t
-db_add_symbol_table(type, start, end, name, ref, map_pointer)
- int type;
- char *start;
- char *end;
- char *name;
- char *ref;
- char *map_pointer;
+db_add_symbol_table(
+ int type,
+ char *start,
+ char *end,
+ char *name,
+ char *ref,
+ char *map_pointer)
{
- register db_symtab_t *st;
+ db_symtab_t *st;
extern vm_map_t kernel_map;
if (db_nsymtab >= MAXNOSYMTABS)
@@ -76,7 +76,8 @@ db_add_symbol_table(type, start, end, name, ref, map_pointer)
st->end = end;
st->private = ref;
st->map_pointer = (map_pointer == (char *)kernel_map)? 0: map_pointer;
- strcpy(st->name, name);
+ strncpy(st->name, name, sizeof st->name - 1);
+ st->name[sizeof st->name - 1] = '\0';
db_nsymtab++;
@@ -89,13 +90,13 @@ db_add_symbol_table(type, start, end, name, ref, map_pointer)
* Note: return value points to static data whose content is
* overwritten by each call... but in practice this seems okay.
*/
-static char *
+static char * __attribute__ ((pure))
db_qualify(symname, symtabname)
- char *symname;
- register char *symtabname;
+ const char *symname;
+ const char *symtabname;
{
static char tmp[256];
- register char *s;
+ char *s;
s = tmp;
while ((*s++ = *symtabname++)) {
@@ -109,7 +110,7 @@ db_qualify(symname, symtabname)
boolean_t
-db_eqname( char* src, char* dst, char c )
+db_eqname( const char* src, const char* dst, char c )
{
if (!strcmp(src, dst))
return (TRUE);
@@ -119,9 +120,9 @@ db_eqname( char* src, char* dst, char c )
}
boolean_t
-db_value_of_name(name, valuep)
- char *name;
- db_expr_t *valuep;
+db_value_of_name(
+ char *name,
+ db_expr_t *valuep)
{
db_sym_t sym;
@@ -141,14 +142,13 @@ db_value_of_name(name, valuep)
* otherwise, all symbol tables will be searched.
*/
db_sym_t
-db_lookup(symstr)
- char *symstr;
+db_lookup(char *symstr)
{
db_sym_t sp;
- register int i;
+ int i;
int symtab_start = 0;
int symtab_end = db_nsymtab;
- register char *cp;
+ char *cp;
/*
* Look for, remove, and remember any symbol table specifier.
@@ -193,13 +193,13 @@ db_lookup(symstr)
* with parsed file name, symbol name and line number.
*/
db_sym_t
-db_sym_parse_and_lookup(func, symtab, symstr)
- db_sym_t (*func)();
- db_symtab_t *symtab;
- char *symstr;
+db_sym_parse_and_lookup(
+ db_sym_t (*func)(),
+ db_symtab_t *symtab,
+ char *symstr)
{
- register char *p;
- register int n;
+ char *p;
+ int n;
int n_name;
int line_number;
char *file_name = 0;
@@ -265,19 +265,17 @@ out:
boolean_t db_qualify_ambiguous_names = FALSE;
boolean_t
-db_name_is_ambiguous(sym_name)
- char *sym_name;
+db_name_is_ambiguous(char *sym_name)
{
- register int i;
- register
+ int i;
boolean_t found_once = FALSE;
if (!db_qualify_ambiguous_names)
return FALSE;
for (i = 0; i < db_nsymtab; i++) {
- db_sym_t sp;
- if (sp = X_db_lookup(&db_symtabs[i], sym_name)) {
+ db_sym_t sp = X_db_lookup(&db_symtabs[i], sym_name);
+ if (sp) {
if (found_once)
{
db_free_symbol(sp);
@@ -290,26 +288,23 @@ db_name_is_ambiguous(sym_name)
return FALSE;
}
-
-db_sym_t db_search_in_task_symbol();
-
/*
* Find the closest symbol to val, and return its name
* and the difference between val and the symbol found.
*
* Logic change. If the task argument is non NULL and a
- * matching symbol is found in a symbol table which explictly
+ * matching symbol is found in a symbol table which explicitly
* specifies its map to be task->map, that symbol will have
* precedence over any symbol from a symbol table will a null
* map. This allows overlapping kernel/user maps to work correctly.
*
*/
db_sym_t
-db_search_task_symbol(val, strategy, offp, task)
- register db_addr_t val;
- db_strategy_t strategy;
- db_addr_t *offp; /* better be unsigned */
- task_t task;
+db_search_task_symbol(
+ db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp, /* better be unsigned */
+ task_t task)
{
db_sym_t ret;
@@ -334,15 +329,15 @@ db_search_task_symbol(val, strategy, offp, task)
}
db_sym_t
-db_search_in_task_symbol(val, strategy, offp, task)
- register db_addr_t val;
- db_strategy_t strategy;
- db_addr_t *offp;
- task_t task;
+db_search_in_task_symbol(
+ db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp,
+ task_t task)
{
- register vm_size_t diff;
+ vm_size_t diff;
vm_size_t newdiff;
- register int i;
+ int i;
db_symtab_t *sp;
db_sym_t ret = DB_SYM_NULL, sym;
vm_map_t map_for_val;
@@ -402,11 +397,11 @@ db_search_in_task_symbol(val, strategy, offp, task)
* Return name and value of a symbol
*/
void
-db_symbol_values(stab, sym, namep, valuep)
- db_symtab_t *stab;
- db_sym_t sym;
- char **namep;
- db_expr_t *valuep;
+db_symbol_values(
+ db_symtab_t *stab,
+ db_sym_t sym,
+ char **namep,
+ db_expr_t *valuep)
{
db_expr_t value;
char *name;
@@ -449,7 +444,7 @@ unsigned long db_maxoff = 0x4000;
void
db_task_printsym(off, strategy, task)
- db_expr_t off;
+ db_addr_t off;
db_strategy_t strategy;
task_t task;
{
@@ -494,7 +489,7 @@ db_line_at_pc( sym, filename, linenum, pc)
db_sym_t sym;
char **filename;
int *linenum;
- db_expr_t pc;
+ db_addr_t pc;
{
return (db_last_symtab) ?
X_db_line_at_pc( db_last_symtab, sym, filename, linenum, pc) :
@@ -512,15 +507,11 @@ void db_free_symbol(db_sym_t s)
* Switch into symbol-table specific routines
*/
-extern boolean_t aout_db_sym_init(), aout_db_line_at_pc();
-extern db_sym_t aout_db_lookup(), aout_db_search_symbol();
-extern void aout_db_symbol_values();
-
-extern boolean_t coff_db_sym_init(), coff_db_line_at_pc();
-extern db_sym_t coff_db_lookup(), coff_db_search_symbol();
-extern void coff_db_symbol_values();
+void dummy_db_free_symbol(db_sym_t symbol) { }
+boolean_t dummy_db_sym_init(char *a, char *b, char *c, char *d) {
+ return FALSE;
+}
-void dummy_db_free_symbol(sym_t) { }
struct db_sym_switch x_db[] = {
@@ -532,15 +523,17 @@ struct db_sym_switch x_db[] = {
aout_db_line_at_pc, aout_db_symbol_values, dummy_db_free_symbol },
#endif /* DB_NO_AOUT */
-#ifdef DB_NO_COFF
{ 0,},
-#else /* DB_NO_COFF */
- { coff_db_sym_init, coff_db_lookup, coff_db_search_symbol,
- coff_db_line_at_pc, coff_db_symbol_values, dummy_db_free_symbol },
-#endif /* DB_NO_COFF */
/* Machdep, not inited here */
- { 0,}
+ { 0,},
+
+#ifdef DB_NO_ELF
+ { 0,},
+#else /* DB_NO_ELF */
+ { dummy_db_sym_init, elf_db_lookup, elf_db_search_symbol,
+ elf_db_line_at_pc, elf_db_symbol_values, dummy_db_free_symbol },
+#endif /* DB_NO_ELF */
};
diff --git a/ddb/db_sym.h b/ddb/db_sym.h
index e40264ab..d8f33874 100644
--- a/ddb/db_sym.h
+++ b/ddb/db_sym.h
@@ -46,6 +46,7 @@ typedef struct {
#define SYMTAB_AOUT 0
#define SYMTAB_COFF 1
#define SYMTAB_MACHDEP 2
+#define SYMTAB_ELF 3
char *start; /* symtab location */
char *end;
char *private; /* optional machdep pointer */
@@ -161,10 +162,10 @@ extern void db_symbol_values( db_symtab_t *stab,
db_search_task_symbol(val,strgy,offp,0)
/* strcmp, modulo leading char */
-extern boolean_t db_eqname( char* src, char* dst, char c );
+extern boolean_t db_eqname( const char* src, const char* dst, char c );
/* print closest symbol to a value */
-extern void db_task_printsym( db_expr_t off,
+extern void db_task_printsym( db_addr_t off,
db_strategy_t strategy,
task_t task);
@@ -205,7 +206,7 @@ extern struct db_sym_switch {
db_sym_t sym,
char **file,
int *line,
- db_expr_t pc
+ db_addr_t pc
);
void (*symbol_values)(
@@ -235,6 +236,35 @@ extern boolean_t db_line_at_pc(
db_sym_t sym,
char **filename,
int *linenum,
- db_expr_t pc);
-
-#endif
+ db_addr_t pc);
+
+extern boolean_t aout_db_sym_init(
+ char *symtab,
+ char *esymtab,
+ char *name,
+ char *task_addr);
+
+extern boolean_t elf_db_sym_init (
+ unsigned shdr_num,
+ vm_size_t shdr_size,
+ vm_offset_t shdr_addr,
+ unsigned shdr_shndx,
+ char *name,
+ char *task_addr);
+
+db_sym_t db_lookup(char *);
+
+db_sym_t
+db_search_in_task_symbol(
+ db_addr_t val,
+ db_strategy_t strategy,
+ db_addr_t *offp,
+ task_t task);
+
+extern db_sym_t
+db_sym_parse_and_lookup(
+ db_sym_t (*func)(),
+ db_symtab_t *symtab,
+ char *symstr);
+
+#endif /* _DDB_DB_SYM_H_ */
diff --git a/ddb/db_task_thread.c b/ddb/db_task_thread.c
index 1146223b..7927e674 100644
--- a/ddb/db_task_thread.c
+++ b/ddb/db_task_thread.c
@@ -52,12 +52,12 @@ thread_t db_default_thread; /* default target thread */
*/
int
db_lookup_task(target_task)
- task_t target_task;
+ const task_t target_task;
{
- register task_t task;
- register int task_id;
- register processor_set_t pset;
- register int npset = 0;
+ task_t task;
+ int task_id;
+ processor_set_t pset;
+ int npset = 0;
task_id = 0;
if (queue_first(&all_psets) == 0)
@@ -82,11 +82,11 @@ db_lookup_task(target_task)
*/
int
db_lookup_task_thread(task, target_thread)
- task_t task;
- thread_t target_thread;
+ const task_t task;
+ const thread_t target_thread;
{
- register thread_t thread;
- register int thread_id;
+ thread_t thread;
+ int thread_id;
thread_id = 0;
if (queue_first(&task->thread_list) == 0)
@@ -106,13 +106,13 @@ db_lookup_task_thread(task, target_thread)
*/
int
db_lookup_thread(target_thread)
- thread_t target_thread;
+ const thread_t target_thread;
{
- register int thread_id;
- register task_t task;
- register processor_set_t pset;
- register int ntask = 0;
- register int npset = 0;
+ int thread_id;
+ task_t task;
+ processor_set_t pset;
+ int ntask = 0;
+ int npset = 0;
if (queue_first(&all_psets) == 0)
return(-1);
@@ -139,7 +139,7 @@ db_lookup_thread(target_thread)
*/
boolean_t
db_check_thread_address_valid(thread)
- thread_t thread;
+ const thread_t thread;
{
if (db_lookup_thread(thread) < 0) {
db_printf("Bad thread address 0x%x\n", thread);
@@ -150,15 +150,14 @@ db_check_thread_address_valid(thread)
}
/*
- * convert task_id(queue postion) to task address
+ * convert task_id(queue position) to task address
*/
task_t
-db_lookup_task_id(task_id)
- register int task_id;
+db_lookup_task_id(int task_id)
{
- register task_t task;
- register processor_set_t pset;
- register int npset = 0;
+ task_t task;
+ processor_set_t pset;
+ int npset = 0;
if (task_id > DB_MAX_TASKID)
return(TASK_NULL);
@@ -181,11 +180,11 @@ db_lookup_task_id(task_id)
* convert (task_id, thread_id) pair to thread address
*/
static thread_t
-db_lookup_thread_id(task, thread_id)
- task_t task;
- register int thread_id;
+db_lookup_thread_id(
+ task_t task,
+ int thread_id)
{
- register thread_t thread;
+ thread_t thread;
if (thread_id > DB_MAX_THREADID)
@@ -204,9 +203,9 @@ db_lookup_thread_id(task, thread_id)
* thread address
*/
boolean_t
-db_get_next_thread(threadp, position)
- thread_t *threadp;
- int position;
+db_get_next_thread(
+ thread_t *threadp,
+ int position)
{
db_expr_t value;
thread_t thread;
@@ -245,17 +244,18 @@ db_init_default_thread(void)
* in the command line
*/
/* ARGSUSED */
-long
-db_set_default_thread(vp, valuep, flag)
- struct db_variable *vp;
- db_expr_t *valuep;
- int flag;
+void
+db_set_default_thread(vp, valuep, flag, ap)
+ struct db_variable *vp;
+ db_expr_t *valuep;
+ int flag;
+ db_var_aux_param_t ap;
{
thread_t thread;
if (flag != DB_VAR_SET) {
*valuep = (db_expr_t) db_default_thread;
- return(0);
+ return;
}
thread = (thread_t) *valuep;
if (thread != THREAD_NULL && !db_check_thread_address_valid(thread))
@@ -264,18 +264,18 @@ db_set_default_thread(vp, valuep, flag)
db_default_thread = thread;
if (thread)
db_default_task = thread->task;
- return(0);
+ return;
}
/*
* convert $taskXXX[.YYY] type DDB variable to task or thread address
*/
-long
-db_get_task_thread(vp, valuep, flag, ap)
- struct db_variable *vp;
- db_expr_t *valuep;
- int flag;
- db_var_aux_param_t ap;
+void
+db_get_task_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap)
{
task_t task;
thread_t thread;
@@ -291,7 +291,7 @@ db_get_task_thread(vp, valuep, flag, ap)
}
if (ap->level <= 1) {
*valuep = (db_expr_t) task;
- return(0);
+ return;
}
if ((thread = db_lookup_thread_id(task, ap->suffix[1])) == THREAD_NULL){
db_printf("no such thread($task%d.%d)\n",
@@ -300,7 +300,7 @@ db_get_task_thread(vp, valuep, flag, ap)
/* NOTREACHED */
}
*valuep = (db_expr_t) thread;
- return(0);
+ return;
}
#endif /* MACH_KDB */
diff --git a/ddb/db_task_thread.h b/ddb/db_task_thread.h
index ebf99d8d..cbb36802 100644
--- a/ddb/db_task_thread.h
+++ b/ddb/db_task_thread.h
@@ -27,6 +27,8 @@
#ifndef _DDB_DB_TASK_THREAD_H_
#define _DDB_DB_TASK_THREAD_H_
+#include <ddb/db_variables.h>
+
#include <kern/task.h>
#include <kern/thread.h>
@@ -41,11 +43,25 @@
extern task_t db_default_task; /* default target task */
extern thread_t db_default_thread; /* default target thread */
-extern int db_lookup_task(task_t);
-extern int db_lookup_thread(thread_t);
-extern int db_lookup_task_thread(task_t, thread_t);
-extern boolean_t db_check_thread_address_valid(thread_t);
+extern int db_lookup_task(const task_t);
+extern int db_lookup_thread(const thread_t);
+extern int db_lookup_task_thread(const task_t, const thread_t);
+extern boolean_t db_check_thread_address_valid(const thread_t);
extern boolean_t db_get_next_thread(thread_t *, int);
extern void db_init_default_thread(void);
+extern void
+db_set_default_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
+extern void
+db_get_task_thread(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ db_var_aux_param_t ap);
+
#endif /* _DDB_DB_TASK_THREAD_H_ */
diff --git a/ddb/db_trap.c b/ddb/db_trap.c
index 8f59a367..cbb6bde0 100644
--- a/ddb/db_trap.c
+++ b/ddb/db_trap.c
@@ -43,30 +43,31 @@
#include <ddb/db_output.h>
#include <ddb/db_task_thread.h>
#include <ddb/db_trap.h>
+#include <ddb/db_run.h>
+#include <machine/db_interface.h>
+#include <kern/lock.h>
extern jmp_buf_t *db_recover;
-extern void db_restart_at_pc();
-extern boolean_t db_stop_at_pc();
-
extern int db_inst_count;
extern int db_load_count;
extern int db_store_count;
void
-db_task_trap(type, code, user_space)
- int type, code;
- boolean_t user_space;
+db_task_trap(
+ int type,
+ int code,
+ boolean_t user_space)
{
jmp_buf_t db_jmpbuf;
jmp_buf_t *prev;
boolean_t bkpt;
boolean_t watchpt;
- void db_init_default_thread();
- void db_check_breakpoint_valid();
task_t task_space;
+ check_simple_locks_disable();
+
task_space = db_target_space(current_thread(), user_space);
bkpt = IS_BREAKPOINT_TRAP(type, code);
watchpt = IS_WATCHPOINT_TRAP(type, code);
@@ -91,17 +92,22 @@ db_task_trap(type, code, user_space)
db_print_loc_and_inst(db_dot, task_space);
else
db_printf("Trouble printing location %#X.\n", db_dot);
+
+ if (!bkpt && !watchpt && _setjmp(db_recover = &db_jmpbuf) == 0)
+ db_stack_trace_cmd(0, 0, -1, "");
db_recover = prev;
db_command_loop();
}
+ check_simple_locks_enable();
db_restart_at_pc(watchpt, task_space);
}
void
-db_trap(type, code)
- int type, code;
+db_trap(
+ int type,
+ int code)
{
db_task_trap(type, code, !DB_VALID_KERN_ADDR(PC_REGS(DDB_REGS)));
}
diff --git a/ddb/db_variables.c b/ddb/db_variables.c
index 55b87422..4442ccbc 100644
--- a/ddb/db_variables.c
+++ b/ddb/db_variables.c
@@ -39,6 +39,7 @@
#include <ddb/db_output.h>
#include <ddb/db_variables.h>
#include <ddb/db_task_thread.h>
+#include <ddb/db_macro.h>
extern unsigned long db_maxoff;
@@ -46,9 +47,6 @@ extern db_expr_t db_radix;
extern db_expr_t db_max_width;
extern db_expr_t db_tab_stop_width;
extern db_expr_t db_max_line;
-extern int db_set_default_thread();
-extern int db_get_task_thread();
-extern int db_arg_variable();
#define DB_NWORK 32 /* number of work variable */
@@ -70,12 +68,12 @@ struct db_variable db_vars[] = {
};
struct db_variable *db_evars = db_vars + sizeof(db_vars)/sizeof(db_vars[0]);
-char *
+const char *
db_get_suffix(suffix, suffix_value)
- register char *suffix;
+ const char *suffix;
short *suffix_value;
{
- register int value;
+ int value;
for (value = 0; *suffix && *suffix != '.' && *suffix != ':'; suffix++) {
if (*suffix < '0' || *suffix > '9')
@@ -92,10 +90,11 @@ static boolean_t
db_cmp_variable_name(vp, name, ap)
struct db_variable *vp;
char *name;
- register db_var_aux_param_t ap;
+ const db_var_aux_param_t ap;
{
- register char *var_np, *np;
- register int level;
+ char *var_np;
+ const char *np;
+ int level;
for (np = name, var_np = vp->name; *var_np; ) {
if (*np++ != *var_np++)
@@ -116,9 +115,9 @@ db_cmp_variable_name(vp, name, ap)
}
int
-db_find_variable(varp, ap)
- struct db_variable **varp;
- db_var_aux_param_t ap;
+db_find_variable(
+ struct db_variable **varp,
+ db_var_aux_param_t ap)
{
int t;
struct db_variable *vp;
@@ -143,12 +142,8 @@ db_find_variable(varp, ap)
return (0);
}
-
-void db_read_write_variable(); /* forward */
-
int
-db_get_variable(valuep)
- db_expr_t *valuep;
+db_get_variable(db_expr_t *valuep)
{
struct db_variable *vp;
struct db_var_aux_param aux_param;
@@ -164,8 +159,7 @@ db_get_variable(valuep)
}
int
-db_set_variable(value)
- db_expr_t value;
+db_set_variable(db_expr_t value)
{
struct db_variable *vp;
struct db_var_aux_param aux_param;
@@ -181,13 +175,13 @@ db_set_variable(value)
}
void
-db_read_write_variable(vp, valuep, rw_flag, ap)
- struct db_variable *vp;
- db_expr_t *valuep;
- int rw_flag;
- db_var_aux_param_t ap;
+db_read_write_variable(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int rw_flag,
+ db_var_aux_param_t ap)
{
- int (*func)() = vp->fcn;
+ void (*func)() = vp->fcn;
struct db_var_aux_param aux_param;
if (ap == 0) {
@@ -206,7 +200,7 @@ db_read_write_variable(vp, valuep, rw_flag, ap)
}
void
-db_set_cmd()
+db_set_cmd(void)
{
db_expr_t value;
int t;
diff --git a/ddb/db_variables.h b/ddb/db_variables.h
index af7068f5..9880d50f 100644
--- a/ddb/db_variables.h
+++ b/ddb/db_variables.h
@@ -32,6 +32,7 @@
#define _DB_VARIABLES_H_
#include <kern/thread.h>
+#include <machine/db_machdep.h>
/*
* Debugger variables.
@@ -42,7 +43,7 @@ struct db_variable {
char *name; /* Name of variable */
db_expr_t *valuep; /* pointer to value of variable */
/* function to call when reading/writing */
- long (*fcn)(struct db_variable *, db_expr_t *, int, db_var_aux_param_t);
+ void (*fcn)(struct db_variable *, db_expr_t *, int, db_var_aux_param_t);
short min_level; /* number of minimum suffix levels */
short max_level; /* number of maximum suffix levels */
short low; /* low value of level 1 suffix */
@@ -50,7 +51,7 @@ struct db_variable {
#define DB_VAR_GET 0
#define DB_VAR_SET 1
};
-#define FCN_NULL ((long (*)())0)
+#define FCN_NULL ((void (*)())0)
#define DB_VAR_LEVEL 3 /* maximum number of suffix level */
@@ -80,6 +81,8 @@ extern struct db_variable *db_eregs;
extern int db_get_variable(db_expr_t *valuep);
-void db_set_cmd();
+void db_set_cmd(void);
+
+void db_read_write_variable(struct db_variable *, db_expr_t *, int, struct db_var_aux_param *);
#endif /* _DB_VARIABLES_H_ */
diff --git a/ddb/db_watch.c b/ddb/db_watch.c
index a2f852bd..f0d0443f 100644
--- a/ddb/db_watch.c
+++ b/ddb/db_watch.c
@@ -65,9 +65,9 @@ db_watchpoint_t db_watchpoint_list = 0;
extern vm_map_t kernel_map;
db_watchpoint_t
-db_watchpoint_alloc()
+db_watchpoint_alloc(void)
{
- register db_watchpoint_t watch;
+ db_watchpoint_t watch;
if ((watch = db_free_watchpoints) != 0) {
db_free_watchpoints = watch->link;
@@ -85,7 +85,7 @@ db_watchpoint_alloc()
void
db_watchpoint_free(watch)
- register db_watchpoint_t watch;
+ db_watchpoint_t watch;
{
watch->link = db_free_watchpoints;
db_free_watchpoints = watch;
@@ -93,11 +93,11 @@ db_watchpoint_free(watch)
void
db_set_watchpoint(task, addr, size)
- task_t task;
+ const task_t task;
db_addr_t addr;
vm_size_t size;
{
- register db_watchpoint_t watch;
+ db_watchpoint_t watch;
/*
* Should we do anything fancy with overlapping regions?
@@ -130,11 +130,11 @@ db_set_watchpoint(task, addr, size)
void
db_delete_watchpoint(task, addr)
- task_t task;
+ const task_t task;
db_addr_t addr;
{
- register db_watchpoint_t watch;
- register db_watchpoint_t *prev;
+ db_watchpoint_t watch;
+ db_watchpoint_t *prev;
for (prev = &db_watchpoint_list; (watch = *prev) != 0;
prev = &watch->link) {
@@ -153,8 +153,8 @@ db_delete_watchpoint(task, addr)
void
db_list_watchpoints(void)
{
- register db_watchpoint_t watch;
- int task_id;
+ db_watchpoint_t watch;
+ int task_id;
if (db_watchpoint_list == 0) {
db_printf("No watchpoints set\n");
@@ -179,7 +179,7 @@ db_list_watchpoints(void)
static int
db_get_task(modif, taskp, addr)
- char *modif;
+ const char *modif;
task_t *taskp;
db_addr_t addr;
{
@@ -221,7 +221,7 @@ db_deletewatch_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
task_t task;
@@ -237,12 +237,11 @@ db_watchpoint_cmd(addr, have_addr, count, modif)
db_expr_t addr;
int have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
vm_size_t size;
db_expr_t value;
task_t task;
- boolean_t db_option();
if (db_get_task(modif, &task, addr) < 0)
return;
@@ -255,7 +254,7 @@ db_watchpoint_cmd(addr, have_addr, count, modif)
/* list watchpoints */
void
-db_listwatch_cmd()
+db_listwatch_cmd(void)
{
db_list_watchpoints();
}
@@ -263,8 +262,8 @@ db_listwatch_cmd()
void
db_set_watchpoints(void)
{
- register db_watchpoint_t watch;
- vm_map_t map;
+ db_watchpoint_t watch;
+ vm_map_t map;
unsigned hw_idx = 0;
if (!db_watchpoints_inserted) {
@@ -295,14 +294,14 @@ db_clear_watchpoints(void)
}
boolean_t
-db_find_watchpoint(map, addr, regs)
- vm_map_t map;
- db_addr_t addr;
- db_regs_t *regs;
+db_find_watchpoint(
+ vm_map_t map,
+ db_addr_t addr,
+ db_regs_t *regs)
{
- register db_watchpoint_t watch;
+ db_watchpoint_t watch;
db_watchpoint_t found = 0;
- register task_t task_space;
+ task_t task_space;
task_space = (map == kernel_map)? TASK_NULL: db_current_task();
for (watch = db_watchpoint_list; watch != 0; watch = watch->link) {
diff --git a/ddb/db_watch.h b/ddb/db_watch.h
index fb95ae53..7ef1a207 100644
--- a/ddb/db_watch.h
+++ b/ddb/db_watch.h
@@ -49,27 +49,27 @@ typedef struct db_watchpoint {
} *db_watchpoint_t;
extern boolean_t db_find_watchpoint(vm_map_t map, db_addr_t addr,
- db_regs_t *regs);
+ db_regs_t *regs);
extern void db_set_watchpoints(void);
extern void db_clear_watchpoints(void);
-extern void db_set_watchpoint(task_t task, db_addr_t addr, vm_size_t size);
-extern void db_delete_watchpoint(task_t task, db_addr_t addr);
+extern void db_set_watchpoint(const task_t task, db_addr_t addr, vm_size_t size);
+extern void db_delete_watchpoint(const task_t task, db_addr_t addr);
extern void db_list_watchpoints(void);
-void db_listwatch_cmd();
+void db_listwatch_cmd(void);
void db_deletewatch_cmd(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
void db_watchpoint_cmd(
db_expr_t addr,
int have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
#endif /* _DDB_DB_WATCH_ */
diff --git a/ddb/db_write_cmd.c b/ddb/db_write_cmd.c
index eacf53b4..46a2ee32 100644
--- a/ddb/db_write_cmd.c
+++ b/ddb/db_write_cmd.c
@@ -55,12 +55,12 @@ db_write_cmd(address, have_addr, count, modif)
db_expr_t address;
boolean_t have_addr;
db_expr_t count;
- char * modif;
+ const char * modif;
{
- register db_addr_t addr;
- register db_expr_t old_value;
+ db_addr_t addr;
+ db_expr_t old_value;
db_expr_t new_value;
- register int size;
+ int size;
boolean_t wrote_one = FALSE;
boolean_t t_opt, u_opt;
thread_t thread;
diff --git a/ddb/db_write_cmd.h b/ddb/db_write_cmd.h
index 74bac54c..3a1d0575 100644
--- a/ddb/db_write_cmd.h
+++ b/ddb/db_write_cmd.h
@@ -1,3 +1,21 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
#ifndef _DDB_DB_WRITE_CMD_H_
#define _DDB_DB_WRITE_CMD_H_
@@ -11,6 +29,6 @@ void db_write_cmd(
db_expr_t address,
boolean_t have_addr,
db_expr_t count,
- char * modif);
+ const char * modif);
#endif /* !_DDB_DB_WRITE_CMD_H_ */
diff --git a/ddb/stab.h b/ddb/stab.h
index 3ebc1af8..55e9d452 100644
--- a/ddb/stab.h
+++ b/ddb/stab.h
@@ -33,6 +33,8 @@
* @(#)stab.h 5.2 (Berkeley) 4/4/91
*/
+#ifndef _DDB_STAB_H_
+#define _DDB_STAB_H_
/*
* The following are symbols used by various debuggers and by the Pascal
@@ -67,3 +69,5 @@
#define N_ECOMM 0xe4 /* end common */
#define N_ECOML 0xe8 /* end common (local name) */
#define N_LENG 0xfe /* length of preceding entry */
+
+#endif /* _DDB_STAB_H_ */
diff --git a/device/blkio.c b/device/blkio.c
index 27fec0e4..e5b4d09f 100644
--- a/device/blkio.c
+++ b/device/blkio.c
@@ -38,12 +38,12 @@
-io_return_t block_io(strat, max_count, ior)
- void (*strat)();
- void (*max_count)();
- register io_req_t ior;
+io_return_t block_io(
+ void (*strat)(),
+ void (*max_count)(),
+ io_req_t ior)
{
- register kern_return_t rc;
+ kern_return_t rc;
boolean_t wait = FALSE;
/*
@@ -88,8 +88,7 @@ io_return_t block_io(strat, max_count, ior)
*/
#define MAX_PHYS (256 * 1024)
-void minphys(ior)
- register io_req_t ior;
+void minphys(io_req_t ior)
{
if ((ior->io_op & (IO_WRITE | IO_READ | IO_OPEN)) == IO_WRITE)
return;
@@ -102,7 +101,7 @@ void minphys(ior)
* Dummy routine placed in device switch entries to indicate that
* block device may be mapped.
*/
-vm_offset_t block_io_mmap()
+int block_io_mmap(dev_t dev, vm_offset_t off, int prot)
{
return (0);
}
diff --git a/device/blkio.h b/device/blkio.h
new file mode 100644
index 00000000..77eb105a
--- /dev/null
+++ b/device/blkio.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_BLKIO_H_
+#define _DEVICE_BLKIO_H_
+
+extern int block_io_mmap(dev_t dev, vm_offset_t off, int prot);
+
+#endif /* _DEVICE_BLKIO_H_ */
diff --git a/device/buf.h b/device/buf.h
index 80466c8d..a79ed8e4 100644
--- a/device/buf.h
+++ b/device/buf.h
@@ -30,6 +30,9 @@
* Definitions to make new IO structures look like old ones
*/
+#ifndef _DEVICE_BUF_H_
+#define _DEVICE_BUF_H_
+
/*
* io_req and fields
*/
@@ -80,11 +83,6 @@
#define B_MD1 IO_SPARE_START
/*
- * Redefine uio structure
- */
-#define uio io_req
-
-/*
* Redefine physio routine
*/
#define physio(strat, xbuf, dev, ops, minphys, ior) \
@@ -100,3 +98,5 @@ extern void minphys(io_req_t);
*/
#define biodone iodone
#define biowait iowait
+
+#endif /* _DEVICE_BUF_H_ */
diff --git a/device/chario.c b/device/chario.c
index c40705e3..0e9dd70b 100644
--- a/device/chario.c
+++ b/device/chario.c
@@ -49,6 +49,7 @@
#include <device/io_req.h>
#include <device/ds_routines.h>
#include <device/device_reply.user.h>
+#include <device/chario.h>
#include <device/tty.h>
@@ -63,16 +64,6 @@ short ttlowat[NSPEEDS] =
125,125 };
/*
- * forward declarations
- */
-void queue_delayed_reply(
- queue_t, io_req_t, boolean_t (*)(io_req_t));
-void tty_output(struct tty *);
-boolean_t char_open_done(io_req_t);
-boolean_t char_read_done(io_req_t);
-boolean_t char_write_done(io_req_t);
-
-/*
* Fake 'line discipline' switch for the benefit of old code
* that wants to call through it.
*/
@@ -89,9 +80,9 @@ struct ldisc_switch linesw[] = {
/*
* Sizes for input and output circular buffers.
*/
-const int tty_inq_size = 4096; /* big nuf */
-const int tty_outq_size = 2048; /* Must be bigger that tthiwat */
-int pdma_default = 1; /* turn pseudo dma on by default */
+const unsigned int tty_inq_size = 4096; /* big nuf */
+const unsigned int tty_outq_size = 2048; /* Must be bigger that tthiwat */
+boolean_t pdma_default = TRUE; /* turn pseudo dma on by default */
/*
* compute pseudo-dma tables
@@ -260,7 +251,7 @@ io_return_t char_write(
spl_t s;
int count;
char *data;
- vm_offset_t addr;
+ vm_offset_t addr = 0;
io_return_t rc = D_SUCCESS;
data = ior->io_data;
@@ -535,9 +526,9 @@ void ttyclose(
*/
boolean_t
tty_queue_clean(
- queue_t q,
- ipc_port_t port,
- boolean_t (*routine)(io_req_t) )
+ queue_t q,
+ const ipc_port_t port,
+ boolean_t (*routine)(io_req_t) )
{
io_req_t ior;
@@ -561,8 +552,8 @@ tty_queue_clean(
*/
boolean_t
tty_portdeath(
- struct tty * tp,
- ipc_port_t port)
+ struct tty * tp,
+ const ipc_port_t port)
{
spl_t spl = spltty();
boolean_t result;
@@ -848,8 +839,7 @@ void ttrstrt(
* Called at spltty, tty already locked.
* Must be on master CPU if device runs on master.
*/
-void ttstart(tp)
- struct tty *tp;
+void ttstart(struct tty *tp)
{
if ((tp->t_state & (TS_TIMEOUT|TS_TTSTOP|TS_BUSY)) == 0) {
/*
@@ -916,7 +906,7 @@ void ttypush(
if (state & TS_MIN_TO_RCV)
{ /* a character was received */
tp->t_state = state & ~TS_MIN_TO_RCV;
- timeout(ttypush,tp,pdma_timeouts[tp->t_ispeed]);
+ timeout(ttypush, tp, pdma_timeouts[tp->t_ispeed]);
}
else
{
@@ -1013,7 +1003,7 @@ void ttyinput_many(
* Do not want to overflow input queue
*/
if (tp->t_inq.c_cc < tp->t_inq.c_hog)
- count -= b_to_q( chars, count, &tp->t_inq);
+ count -= b_to_q(chars, count, &tp->t_inq);
tty_queue_completion(&tp->t_delayed_read);
}
diff --git a/device/chario.h b/device/chario.h
new file mode 100644
index 00000000..52105a20
--- /dev/null
+++ b/device/chario.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_CHARIO_H_
+#define _DEVICE_CHARIO_H_
+
+#include <device/tty.h>
+
+extern void chario_init(void);
+
+void queue_delayed_reply(
+ queue_t qh,
+ io_req_t ior,
+ boolean_t (*io_done)(io_req_t));
+
+void tty_output(struct tty *tp);
+
+boolean_t char_open_done(io_req_t);
+boolean_t char_read_done(io_req_t);
+boolean_t char_write_done(io_req_t);
+
+#endif /* _DEVICE_CHARIO_H_ */
diff --git a/device/cirbuf.c b/device/cirbuf.c
index 7af2d7ad..391297ce 100644
--- a/device/cirbuf.c
+++ b/device/cirbuf.c
@@ -42,7 +42,9 @@
/* if c_cl == c_cf - 1, buffer is full */
#if DEBUG
-int cb_check_enable = 0;
+#include <mach/boolean.h>
+
+boolean_t cb_check_enable = FALSE;
#define CB_CHECK(cb) if (cb_check_enable) cb_check(cb)
void
@@ -270,7 +272,7 @@ void cb_clear(struct cirbuf *cb)
void
cb_alloc(
struct cirbuf *cb,
- int buf_size)
+ vm_size_t buf_size)
{
char *buf;
@@ -292,7 +294,7 @@ cb_alloc(
void
cb_free(struct cirbuf *cb)
{
- int size;
+ vm_size_t size;
size = cb->c_end - cb->c_start;
kfree((vm_offset_t)cb->c_start, size);
diff --git a/device/cirbuf.h b/device/cirbuf.h
index a3f50ce5..64771ce1 100644
--- a/device/cirbuf.h
+++ b/device/cirbuf.h
@@ -52,11 +52,10 @@ extern int putc(int, struct cirbuf *);
extern int getc(struct cirbuf *);
extern int q_to_b(struct cirbuf *, char *, int);
extern int b_to_q(char *, int, struct cirbuf *);
-extern int nqdb(struct cirbuf *, int);
extern void ndflush(struct cirbuf *, int);
extern void cb_clear(struct cirbuf *);
-extern void cb_alloc(struct cirbuf *, int);
+extern void cb_alloc(struct cirbuf *, vm_size_t);
extern void cb_free(struct cirbuf *);
#endif /* _DEVICE_CIRBUF_H_ */
diff --git a/device/conf.h b/device/conf.h
index e91e0996..fea18223 100644
--- a/device/conf.h
+++ b/device/conf.h
@@ -32,22 +32,30 @@
#define _DEVICE_CONF_H_
#include <mach/machine/vm_types.h>
+#include <sys/types.h>
+#include <mach/port.h>
+#include <mach/vm_prot.h>
+
+struct io_req;
+typedef struct io_req *io_req_t;
+
+typedef int io_return_t;
/*
* Operations list for major device types.
*/
struct dev_ops {
- char * d_name; /* name for major device */
- int (*d_open)(); /* open device */
- int (*d_close)(); /* close device */
- int (*d_read)(); /* read */
- int (*d_write)(); /* write */
- int (*d_getstat)(); /* get status/control */
- int (*d_setstat)(); /* set status/control */
- vm_offset_t (*d_mmap)(); /* map memory */
- int (*d_async_in)();/* asynchronous input setup */
- int (*d_reset)(); /* reset device */
- int (*d_port_death)();
+ char * d_name; /* name for major device */
+ int (*d_open)(dev_t, int, io_req_t);/* open device */
+ void (*d_close)(dev_t, int); /* close device */
+ int (*d_read)(dev_t, io_req_t); /* read */
+ int (*d_write)(dev_t, io_req_t); /* write */
+ int (*d_getstat)(dev_t, int, int *, natural_t *); /* get status/control */
+ int (*d_setstat)(dev_t, int, int *, natural_t); /* set status/control */
+ int (*d_mmap)(dev_t, vm_offset_t, vm_prot_t); /* map memory */
+ int (*d_async_in)(); /* asynchronous input setup */
+ int (*d_reset)(); /* reset device */
+ int (*d_port_death)(dev_t, mach_port_t);
/* clean up reply ports */
int d_subdev; /* number of sub-devices per
unit */
@@ -58,9 +66,16 @@ typedef struct dev_ops *dev_ops_t;
/*
* Routines for null entries.
*/
-extern int nulldev(); /* no operation - OK */
-extern int nodev(); /* no operation - error */
-extern vm_offset_t nomap(); /* no operation - error */
+extern int nulldev(void); /* no operation - OK */
+extern int nulldev_open(dev_t dev, int flag, io_req_t ior);
+extern void nulldev_close(dev_t dev, int flags);
+extern int nulldev_read(dev_t dev, io_req_t ior);
+extern int nulldev_write(dev_t dev, io_req_t ior);
+extern io_return_t nulldev_getstat(dev_t dev, int flavor, int *data, natural_t *count);
+extern io_return_t nulldev_setstat(dev_t dev, int flavor, int *data, natural_t count);
+extern io_return_t nulldev_portdeath(dev_t dev, mach_port_t port);
+extern int nodev(void); /* no operation - error */
+extern int nomap(dev_t dev, vm_offset_t off, int prot); /* no operation - error */
/*
* Flavor constants for d_dev_info routine
@@ -105,10 +120,5 @@ extern int dev_indirect_count;
di < &dev_indirect_list[dev_indirect_count]; \
di++)
-/*
- * Exported routine to set indirection.
- */
-extern void dev_set_indirect(char *, dev_ops_t, int);
-
#endif /* _DEVICE_CONF_H_ */
diff --git a/device/cons.c b/device/cons.c
index ceba7f24..b04621ae 100644
--- a/device/cons.c
+++ b/device/cons.c
@@ -30,7 +30,7 @@
#ifdef MACH_KMSG
#include <device/io_req.h>
#include <device/kmsg.h>
-#endif
+#endif /* MACH_KMSG */
static boolean_t cn_inited = FALSE;
static struct consdev *cn_tab = 0; /* physical console device info */
@@ -42,8 +42,8 @@ static struct consdev *cn_tab = 0; /* physical console device info */
* is enabled. This can be useful to debug (or catch panics from) code early
* in the bootstrap procedure.
*/
-int (*romgetc)() = 0;
-void (*romputc)() = 0;
+int (*romgetc)(char c) = 0;
+void (*romputc)(char c) = 0;
#if CONSBUFSIZE > 0
/*
@@ -55,10 +55,10 @@ void (*romputc)() = 0;
static char consbuf[CONSBUFSIZE] = { 0 };
static char *consbp = consbuf;
static boolean_t consbufused = FALSE;
-#endif
+#endif /* CONSBUFSIZE > 0 */
void
-cninit()
+cninit(void)
{
struct consdev *cp;
dev_ops_t cn_ops;
@@ -108,7 +108,7 @@ cninit()
} while (cbp != consbp);
consbufused = FALSE;
}
-#endif
+#endif /* CONSBUFSIZE > 0 */
cn_inited = TRUE;
return;
}
@@ -120,7 +120,7 @@ cninit()
int
-cngetc()
+cngetc(void)
{
if (cn_tab)
return ((*cn_tab->cn_getc)(cn_tab->cn_dev, 1));
@@ -130,7 +130,7 @@ cngetc()
}
int
-cnmaygetc()
+cnmaygetc(void)
{
if (cn_tab)
return((*cn_tab->cn_getc)(cn_tab->cn_dev, 0));
@@ -180,5 +180,5 @@ cnputc(c)
if (consbp >= &consbuf[CONSBUFSIZE])
consbp = consbuf;
}
-#endif
+#endif /* CONSBUFSIZE > 0 */
}
diff --git a/device/cons.h b/device/cons.h
index 8ac796cd..34f3bc56 100644
--- a/device/cons.h
+++ b/device/cons.h
@@ -54,4 +54,15 @@ extern int cngetc(void);
extern int cnmaygetc(void);
extern void cnputc(char);
+
+/*
+ * ROM getc/putc primitives.
+ * On some architectures, the boot ROM provides basic character input/output
+ * routines that can be used before devices are configured or virtual memory
+ * is enabled. This can be useful to debug (or catch panics from) code early
+ * in the bootstrap procedure.
+ */
+extern int (*romgetc)(char c);
+extern void (*romputc)(char c);
+
#endif /* _DEVICE_CONS_H */
diff --git a/device/dev_hdr.h b/device/dev_hdr.h
index 340a2db1..ff7d2ef5 100644
--- a/device/dev_hdr.h
+++ b/device/dev_hdr.h
@@ -134,16 +134,16 @@ boolean_t dev_map(boolean_t (*)(), mach_port_t);
* device name lookup
*/
extern boolean_t dev_name_lookup(
- char * name,
- dev_ops_t *ops, /* out */
- int *unit); /* out */
+ char * name,
+ dev_ops_t *ops, /* out */
+ int *unit); /* out */
/*
* Change an entry in the indirection list.
*/
extern void dev_set_indirection(
- char *name,
+ const char *name,
dev_ops_t ops,
- int unit);
+ int unit);
#endif /* _DEVICE_DEV_HDR_H_ */
diff --git a/device/dev_lookup.c b/device/dev_lookup.c
index 01317b9b..9af7508c 100644
--- a/device/dev_lookup.c
+++ b/device/dev_lookup.c
@@ -43,6 +43,7 @@
#include <kern/ipc_kobject.h>
#include <device/device_emul.h>
+#include <device/ds_routines.h>
/*
* Device structure routines: reference counting, port->device.
@@ -70,7 +71,7 @@ struct kmem_cache dev_hdr_cache;
*/
void
dev_number_enter(device)
- mach_device_t device;
+ const mach_device_t device;
{
queue_t q;
@@ -84,7 +85,7 @@ dev_number_enter(device)
*/
void
dev_number_remove(device)
- mach_device_t device;
+ const mach_device_t device;
{
queue_t q;
@@ -98,7 +99,7 @@ dev_number_remove(device)
*/
mach_device_t
dev_number_lookup(ops, devnum)
- dev_ops_t ops;
+ const dev_ops_t ops;
int devnum;
{
queue_t q;
@@ -119,8 +120,7 @@ dev_number_lookup(ops, devnum)
* table.
*/
mach_device_t
-device_lookup(name)
- char * name;
+device_lookup(char *name)
{
dev_ops_t dev_ops;
int dev_minor;
@@ -197,8 +197,7 @@ device_lookup(name)
* Add a reference to the device.
*/
void
-mach_device_reference(device)
- mach_device_t device;
+mach_device_reference(mach_device_t device)
{
simple_lock(&device->ref_lock);
device->ref_count++;
@@ -210,8 +209,7 @@ mach_device_reference(device)
* structure if no references are left.
*/
void
-mach_device_deallocate(device)
- mach_device_t device;
+mach_device_deallocate(mach_device_t device)
{
simple_lock(&device->ref_lock);
if (--device->ref_count > 0) {
@@ -242,15 +240,12 @@ mach_device_deallocate(device)
/*
* port-to-device lookup routines.
*/
-decl_simple_lock_data(,
- dev_port_lock)
/*
* Enter a port-to-device mapping.
*/
void
-dev_port_enter(device)
- mach_device_t device;
+dev_port_enter(mach_device_t device)
{
mach_device_reference(device);
@@ -268,8 +263,7 @@ dev_port_enter(device)
* Remove a port-to-device mapping.
*/
void
-dev_port_remove(device)
- mach_device_t device;
+dev_port_remove(mach_device_t device)
{
ipc_kobject_set(device->port, IKO_NULL, IKOT_NONE);
mach_device_deallocate(device);
@@ -280,8 +274,7 @@ dev_port_remove(device)
* Doesn't consume the naked send right; produces a device reference.
*/
device_t
-dev_port_lookup(port)
- ipc_port_t port;
+dev_port_lookup(ipc_port_t port)
{
device_t device;
@@ -307,7 +300,7 @@ dev_port_lookup(port)
*/
ipc_port_t
convert_device_to_port(device)
- device_t device;
+ const device_t device;
{
if (device == DEVICE_NULL)
return IP_NULL;
@@ -322,9 +315,9 @@ convert_device_to_port(device)
* return FALSE.
*/
boolean_t
-dev_map(routine, port)
- boolean_t (*routine)();
- mach_port_t port;
+dev_map(
+ boolean_t (*routine)(),
+ mach_port_t port)
{
int i;
queue_t q;
@@ -363,7 +356,7 @@ dev_map(routine, port)
* Initialization
*/
void
-dev_lookup_init()
+dev_lookup_init(void)
{
int i;
@@ -372,8 +365,6 @@ dev_lookup_init()
for (i = 0; i < NDEVHASH; i++)
queue_init(&dev_number_hash_table[i]);
- simple_lock_init(&dev_port_lock);
-
kmem_cache_init(&dev_hdr_cache, "mach_device",
- sizeof(struct mach_device), 0, NULL, NULL, NULL, 0);
+ sizeof(struct mach_device), 0, NULL, 0);
}
diff --git a/device/dev_master.h b/device/dev_master.h
index 964ae828..70d4c63f 100644
--- a/device/dev_master.h
+++ b/device/dev_master.h
@@ -30,11 +30,14 @@
* Bind an IO operation to the master CPU.
*/
+#ifndef _DEVICE_DEV_MASTER_H_
+#define _DEVICE_DEV_MASTER_H_
+
#include <cpus.h>
#if NCPUS > 1
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/cpu_number.h>
#include <kern/sched_prim.h>
#include <kern/thread.h>
@@ -58,3 +61,5 @@
#define io_release_master()
#endif NCPUS > 1
+
+#endif /* _DEVICE_DEV_MASTER_H_ */
diff --git a/device/dev_name.c b/device/dev_name.c
index de9e360b..175e3890 100644
--- a/device/dev_name.c
+++ b/device/dev_name.c
@@ -39,18 +39,52 @@
/*
* Routines placed in empty entries in the device tables
*/
-int nulldev()
+int nulldev(void)
{
return (D_SUCCESS);
}
-int nodev()
+int nulldev_open(dev_t dev, int flags, io_req_t ior)
+{
+ return (D_SUCCESS);
+}
+
+void nulldev_close(dev_t dev, int flags)
+{
+}
+
+int nulldev_read(dev_t dev, io_req_t ior)
+{
+ return (D_SUCCESS);
+}
+
+int nulldev_write(dev_t dev, io_req_t ior)
+{
+ return (D_SUCCESS);
+}
+
+io_return_t nulldev_getstat(dev_t dev, int flavor, int *data, natural_t *count)
+{
+ return (D_SUCCESS);
+}
+
+io_return_t nulldev_setstat(dev_t dev, int flavor, int *data, natural_t count)
+{
+ return (D_SUCCESS);
+}
+
+int nulldev_portdeath(dev_t dev, mach_port_t port)
+{
+ return (D_SUCCESS);
+}
+
+int nodev(void)
{
return (D_INVALID_OPERATION);
}
-vm_offset_t
-nomap()
+int
+nomap(dev_t dev, vm_offset_t off, int prot)
{
return (D_INVALID_OPERATION);
}
@@ -63,11 +97,11 @@ nomap()
* src and target are equal in first 'len' characters
* next character of target is 0 (end of string).
*/
-boolean_t
+boolean_t __attribute__ ((pure))
name_equal(src, len, target)
- char *src;
- int len;
- char *target;
+ const char *src;
+ int len;
+ const char *target;
{
while (--len >= 0)
if (*src++ != *target++)
@@ -78,10 +112,10 @@ name_equal(src, len, target)
/*
* device name lookup
*/
-boolean_t dev_name_lookup(name, ops, unit)
- char *name;
- dev_ops_t *ops; /* out */
- int *unit; /* out */
+boolean_t dev_name_lookup(
+ char *name,
+ dev_ops_t *ops, /* out */
+ int *unit) /* out */
{
/*
* Assume that block device names are of the form
@@ -173,7 +207,7 @@ boolean_t dev_name_lookup(name, ops, unit)
}
*unit += (slice_num << 4);
- /* if slice==0, it is either compatability or whole device */
+ /* if slice==0, it is either compatibility or whole device */
if (c >= 'a' && c < 'a' + j) { /* note: w/o this -> whole slice */
/*
@@ -191,7 +225,7 @@ boolean_t dev_name_lookup(name, ops, unit)
*/
void
dev_set_indirection(name, ops, unit)
- char *name;
+ const char *name;
dev_ops_t ops;
int unit;
{
@@ -207,9 +241,9 @@ dev_set_indirection(name, ops, unit)
}
boolean_t dev_change_indirect(iname, dname, unit)
- char *iname;
- char *dname;
- int unit;
+ const char *iname;
+ const char *dname;
+ int unit;
{
struct dev_ops *dp;
struct dev_indirect *di;
diff --git a/device/dev_pager.c b/device/dev_pager.c
index 1a60045c..40331706 100644
--- a/device/dev_pager.c
+++ b/device/dev_pager.c
@@ -55,9 +55,8 @@
#include <device/dev_hdr.h>
#include <device/io_req.h>
#include <device/memory_object_reply.user.h>
-
-extern vm_offset_t block_io_mmap(); /* dummy routine to allow
- mmap for block devices */
+#include <device/dev_pager.h>
+#include <device/blkio.h>
/*
* The device pager routines are called directly from the message
@@ -174,15 +173,15 @@ void dev_pager_hash_init(void)
size = sizeof(struct dev_pager_entry);
kmem_cache_init(&dev_pager_hash_cache, "dev_pager_entry", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
for (i = 0; i < DEV_PAGER_HASH_COUNT; i++)
queue_init(&dev_pager_hashtable[i]);
simple_lock_init(&dev_pager_hash_lock);
}
void dev_pager_hash_insert(
- ipc_port_t name_port,
- dev_pager_t rec)
+ const ipc_port_t name_port,
+ const dev_pager_t rec)
{
dev_pager_entry_t new_entry;
@@ -196,7 +195,7 @@ void dev_pager_hash_insert(
simple_unlock(&dev_pager_hash_lock);
}
-void dev_pager_hash_delete(ipc_port_t name_port)
+void dev_pager_hash_delete(const ipc_port_t name_port)
{
queue_t bucket;
dev_pager_entry_t entry;
@@ -217,7 +216,7 @@ void dev_pager_hash_delete(ipc_port_t name_port)
kmem_cache_free(&dev_pager_hash_cache, (vm_offset_t)entry);
}
-dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port)
+dev_pager_t dev_pager_hash_lookup(const ipc_port_t name_port)
{
queue_t bucket;
dev_pager_entry_t entry;
@@ -241,11 +240,11 @@ dev_pager_t dev_pager_hash_lookup(ipc_port_t name_port)
}
kern_return_t device_pager_setup(
- mach_device_t device,
- int prot,
- vm_offset_t offset,
- vm_size_t size,
- mach_port_t *pager)
+ const mach_device_t device,
+ int prot,
+ vm_offset_t offset,
+ vm_size_t size,
+ mach_port_t *pager)
{
dev_pager_t d;
@@ -318,16 +317,12 @@ void device_pager_release(memory_object_t object)
boolean_t device_pager_debug = FALSE;
-boolean_t device_pager_data_request_done(); /* forward */
-boolean_t device_pager_data_write_done(); /* forward */
-
-
kern_return_t device_pager_data_request(
- ipc_port_t pager,
- ipc_port_t pager_request,
- vm_offset_t offset,
- vm_size_t length,
- vm_prot_t protection_required)
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ vm_offset_t offset,
+ vm_size_t length,
+ vm_prot_t protection_required)
{
dev_pager_t ds;
@@ -335,7 +330,7 @@ kern_return_t device_pager_data_request(
printf("(device_pager)data_request: pager=%p, offset=0x%lx, length=0x%x\n",
pager, offset, length);
- ds = dev_pager_hash_lookup((ipc_port_t)pager);
+ ds = dev_pager_hash_lookup(pager);
if (ds == DEV_PAGER_NULL)
panic("(device_pager)data_request: lookup failed");
@@ -344,7 +339,6 @@ kern_return_t device_pager_data_request(
if (ds->type == CHAR_PAGER_TYPE) {
vm_object_t object;
- vm_offset_t device_map_page(void *, vm_offset_t);
object = vm_object_lookup(pager_request);
if (object == VM_OBJECT_NULL) {
@@ -357,7 +351,7 @@ kern_return_t device_pager_data_request(
vm_object_page_map(object,
offset, length,
- device_map_page, (char *)ds);
+ device_map_page, (void *)ds);
vm_object_deallocate(object);
}
@@ -422,13 +416,13 @@ boolean_t device_pager_data_request_done(io_req_t ior)
if (device_pager_debug)
printf("(device_pager)data_request_done: r: 0x%lx\n", ior->io_residual);
memset((&ior->io_data[ior->io_count - ior->io_residual]), 0,
- (unsigned) ior->io_residual);
+ (size_t) ior->io_residual);
}
} else {
size_read = ior->io_count - ior->io_residual;
}
- start_alloc = trunc_page((vm_offset_t)ior->io_data);
+ start_alloc = trunc_page(ior->io_data);
end_alloc = start_alloc + round_page(ior->io_alloc_size);
if (ior->io_error == D_SUCCESS) {
@@ -463,8 +457,8 @@ boolean_t device_pager_data_request_done(io_req_t ior)
}
kern_return_t device_pager_data_write(
- ipc_port_t pager,
- ipc_port_t pager_request,
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
vm_offset_t offset,
pointer_t addr,
vm_size_t data_count)
@@ -476,7 +470,7 @@ kern_return_t device_pager_data_write(
panic("(device_pager)data_write: called");
- ds = dev_pager_hash_lookup((ipc_port_t)pager);
+ ds = dev_pager_hash_lookup(pager);
if (ds == DEV_PAGER_NULL)
panic("(device_pager)data_write: lookup failed");
@@ -519,8 +513,7 @@ kern_return_t device_pager_data_write(
return (KERN_SUCCESS);
}
-boolean_t device_pager_data_write_done(ior)
- io_req_t ior;
+boolean_t device_pager_data_write_done(io_req_t ior)
{
device_write_dealloc(ior);
mach_device_deallocate(ior->io_device);
@@ -529,19 +522,19 @@ boolean_t device_pager_data_write_done(ior)
}
kern_return_t device_pager_copy(
- ipc_port_t pager,
- ipc_port_t pager_request,
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
vm_offset_t offset,
vm_size_t length,
- ipc_port_t new_pager)
+ const ipc_port_t new_pager)
{
panic("(device_pager)copy: called");
}
kern_return_t
device_pager_supply_completed(
- ipc_port_t pager,
- ipc_port_t pager_request,
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
vm_offset_t offset,
vm_size_t length,
kern_return_t result,
@@ -552,8 +545,8 @@ device_pager_supply_completed(
kern_return_t
device_pager_data_return(
- ipc_port_t pager,
- ipc_port_t pager_request,
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
vm_offset_t offset,
pointer_t addr,
vm_size_t data_cnt,
@@ -565,7 +558,7 @@ device_pager_data_return(
kern_return_t
device_pager_change_completed(
- ipc_port_t pager,
+ const ipc_port_t pager,
boolean_t may_cache,
memory_object_copy_strategy_t copy_strategy)
{
@@ -590,10 +583,10 @@ vm_offset_t device_map_page(
}
kern_return_t device_pager_init_pager(
- ipc_port_t pager,
- ipc_port_t pager_request,
- ipc_port_t pager_name,
- vm_size_t pager_page_size)
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ const ipc_port_t pager_name,
+ vm_size_t pager_page_size)
{
dev_pager_t ds;
@@ -641,9 +634,9 @@ kern_return_t device_pager_init_pager(
}
kern_return_t device_pager_terminate(
- ipc_port_t pager,
- ipc_port_t pager_request,
- ipc_port_t pager_name)
+ const ipc_port_t pager,
+ const ipc_port_t pager_request,
+ const ipc_port_t pager_name)
{
dev_pager_t ds;
@@ -683,8 +676,8 @@ kern_return_t device_pager_terminate(
}
kern_return_t device_pager_data_unlock(
- ipc_port_t memory_object,
- ipc_port_t memory_control_port,
+ const ipc_port_t memory_object,
+ const ipc_port_t memory_control_port,
vm_offset_t offset,
vm_size_t length,
vm_prot_t desired_access)
@@ -694,10 +687,10 @@ kern_return_t device_pager_data_unlock(
}
kern_return_t device_pager_lock_completed(
- ipc_port_t memory_object,
- ipc_port_t pager_request_port,
- vm_offset_t offset,
- vm_size_t length)
+ const ipc_port_t memory_object,
+ const ipc_port_t pager_request_port,
+ vm_offset_t offset,
+ vm_size_t length)
{
panic("(device_pager)lock_completed: called");
return (KERN_FAILURE);
@@ -712,7 +705,7 @@ void device_pager_init(void)
*/
size = sizeof(struct dev_pager);
kmem_cache_init(&dev_pager_cache, "dev_pager", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
/*
* Initialize the name port hashing stuff.
diff --git a/device/dev_pager.h b/device/dev_pager.h
new file mode 100644
index 00000000..7f97ee7e
--- /dev/null
+++ b/device/dev_pager.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_DEV_PAGER_H_
+#define _DEVICE_DEV_PAGER_H_
+
+vm_offset_t device_map_page(void *dsp, vm_offset_t offset);
+
+boolean_t device_pager_data_request_done(io_req_t ior);
+
+boolean_t device_pager_data_write_done(io_req_t ior);
+
+#endif /* _DEVICE_DEV_PAGER_H_ */
diff --git a/device/device_init.c b/device/device_init.c
index e43a2a95..794186ee 100644
--- a/device/device_init.c
+++ b/device/device_init.c
@@ -38,20 +38,15 @@
#include <device/device_types.h>
#include <device/device_port.h>
#include <device/tty.h>
+#include <device/ds_routines.h>
+#include <device/net_io.h>
+#include <device/chario.h>
-extern void mach_device_init();
-extern void dev_lookup_init();
-extern void net_io_init();
-extern void device_pager_init();
-
-extern void io_done_thread();
-extern void net_thread();
-
ipc_port_t master_device_port;
void
-device_service_create()
+device_service_create(void)
{
master_device_port = ipc_port_alloc_kernel();
if (master_device_port == IP_NULL)
diff --git a/device/device_init.h b/device/device_init.h
new file mode 100644
index 00000000..175b34d7
--- /dev/null
+++ b/device/device_init.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _DEVICE_DEVICE_INIT_H_
+#define _DEVICE_DEVICE_INIT_H_
+
+extern void device_service_create(void);
+
+#endif /* _DEVICE_DEVICE_INIT_H_ */
diff --git a/device/device_types_kernel.h b/device/device_types_kernel.h
index 87ce00f5..e17055c1 100644
--- a/device/device_types_kernel.h
+++ b/device/device_types_kernel.h
@@ -38,7 +38,6 @@
#include <mach/port.h>
#include <device/dev_hdr.h>
-extern device_t dev_port_lookup(ipc_port_t);
extern ipc_port_t convert_device_to_port(device_t);
#endif /* _DEVICE_DEVICE_TYPES_KERNEL_H_ */
diff --git a/device/ds_routines.c b/device/ds_routines.c
index ee575e5b..dbff7f89 100644
--- a/device/ds_routines.c
+++ b/device/ds_routines.c
@@ -99,7 +99,7 @@
extern struct device_emulation_ops linux_block_emulation_ops;
#ifdef CONFIG_INET
extern struct device_emulation_ops linux_net_emulation_ops;
-extern void free_skbuffs ();
+extern void free_skbuffs (void);
#ifdef CONFIG_PCMCIA
extern struct device_emulation_ops linux_pcmcia_emulation_ops;
#endif /* CONFIG_PCMCIA */
@@ -417,12 +417,11 @@ mach_convert_device_to_port (mach_device_t device)
}
static io_return_t
-device_open(reply_port, reply_port_type, mode, name, device_p)
- ipc_port_t reply_port;
- mach_msg_type_name_t reply_port_type;
- dev_mode_t mode;
- char * name;
- device_t *device_p; /* out */
+device_open(const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ char * name,
+ device_t *device_p)
{
mach_device_t device;
kern_return_t result;
@@ -537,8 +536,7 @@ device_open(reply_port, reply_port_type, mode, name, device_p)
}
boolean_t
-ds_open_done(ior)
- io_req_t ior;
+ds_open_done(const io_req_t ior)
{
kern_return_t result;
mach_device_t device;
@@ -597,9 +595,10 @@ ds_open_done(ior)
}
static io_return_t
-device_close(device)
- mach_device_t device;
+device_close(void *dev)
{
+ mach_device_t device = dev;
+
device_lock(device);
/*
@@ -639,7 +638,7 @@ device_close(device)
/*
* Close the device
*/
- (*device->dev_ops->d_close)(device->dev_number);
+ (*device->dev_ops->d_close)(device->dev_number, 0);
/*
* Finally mark it closed. If someone else is trying
@@ -660,17 +659,16 @@ device_close(device)
* Write to a device.
*/
static io_return_t
-device_write(device, reply_port, reply_port_type, mode, recnum,
- data, data_count, bytes_written)
- mach_device_t device;
- ipc_port_t reply_port;
- mach_msg_type_name_t reply_port_type;
- dev_mode_t mode;
- recnum_t recnum;
- io_buf_ptr_t data;
- unsigned int data_count;
- int *bytes_written; /* out */
+device_write(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ const io_buf_ptr_t data,
+ unsigned int data_count,
+ int *bytes_written)
{
+ mach_device_t device = dev;
io_req_t ior;
io_return_t result;
@@ -751,17 +749,16 @@ device_write(device, reply_port, reply_port_type, mode, recnum,
* Write to a device, but memory is in message.
*/
static io_return_t
-device_write_inband(device, reply_port, reply_port_type, mode, recnum,
- data, data_count, bytes_written)
- mach_device_t device;
- ipc_port_t reply_port;
- mach_msg_type_name_t reply_port_type;
- dev_mode_t mode;
- recnum_t recnum;
- io_buf_ptr_inband_t data;
- unsigned int data_count;
- int *bytes_written; /* out */
+device_write_inband(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ io_buf_ptr_inband_t data,
+ unsigned int data_count,
+ int *bytes_written)
{
+ mach_device_t device = dev;
io_req_t ior;
io_return_t result;
@@ -824,9 +821,9 @@ device_write_inband(device, reply_port, reply_port_type, mode, recnum,
* Wire down incoming memory to give to device.
*/
kern_return_t
-device_write_get(ior, wait)
- io_req_t ior;
- boolean_t *wait;
+device_write_get(
+ io_req_t ior,
+ boolean_t *wait)
{
vm_map_copy_t io_copy;
vm_offset_t new_addr;
@@ -919,8 +916,7 @@ device_write_get(ior, wait)
* Clean up memory allocated for IO.
*/
boolean_t
-device_write_dealloc(ior)
- io_req_t ior;
+device_write_dealloc(io_req_t ior)
{
vm_map_copy_t new_copy = VM_MAP_COPY_NULL;
vm_map_copy_t io_copy;
@@ -1020,8 +1016,7 @@ device_write_dealloc(ior)
* Send write completion message to client, and discard the data.
*/
boolean_t
-ds_write_done(ior)
- io_req_t ior;
+ds_write_done(const io_req_t ior)
{
/*
* device_write_dealloc discards the data that has been
@@ -1066,17 +1061,16 @@ ds_write_done(ior)
* Read from a device.
*/
static io_return_t
-device_read(device, reply_port, reply_port_type, mode, recnum,
- bytes_wanted, data, data_count)
- mach_device_t device;
- ipc_port_t reply_port;
- mach_msg_type_name_t reply_port_type;
- dev_mode_t mode;
- recnum_t recnum;
- int bytes_wanted;
- io_buf_ptr_t *data; /* out */
- unsigned int *data_count; /* out */
+device_read(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ int bytes_wanted,
+ io_buf_ptr_t *data,
+ unsigned int *data_count)
{
+ mach_device_t device = dev;
io_req_t ior;
io_return_t result;
@@ -1143,17 +1137,16 @@ device_read(device, reply_port, reply_port_type, mode, recnum,
* Read from a device, but return the data 'inband.'
*/
static io_return_t
-device_read_inband(device, reply_port, reply_port_type, mode, recnum,
- bytes_wanted, data, data_count)
- mach_device_t device;
- ipc_port_t reply_port;
- mach_msg_type_name_t reply_port_type;
- dev_mode_t mode;
- recnum_t recnum;
- int bytes_wanted;
- char *data; /* pointer to OUT array */
- unsigned int *data_count; /* out */
+device_read_inband(void *dev,
+ const ipc_port_t reply_port,
+ mach_msg_type_name_t reply_port_type,
+ dev_mode_t mode,
+ recnum_t recnum,
+ int bytes_wanted,
+ char *data,
+ unsigned int *data_count)
{
+ mach_device_t device = dev;
io_req_t ior;
io_return_t result;
@@ -1221,9 +1214,9 @@ device_read_inband(device, reply_port, reply_port_type, mode, recnum,
/*
* Allocate wired-down memory for device read.
*/
-kern_return_t device_read_alloc(ior, size)
- io_req_t ior;
- vm_size_t size;
+kern_return_t device_read_alloc(
+ io_req_t ior,
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -1250,8 +1243,7 @@ kern_return_t device_read_alloc(ior, size)
return (KERN_SUCCESS);
}
-boolean_t ds_read_done(ior)
- io_req_t ior;
+boolean_t ds_read_done(const io_req_t ior)
{
vm_offset_t start_data, end_data;
vm_offset_t start_sent, end_sent;
@@ -1274,9 +1266,9 @@ boolean_t ds_read_done(ior)
* Zero memory that the device did not fill.
*/
if (start_sent < start_data)
- memset((char *)start_sent, 0, start_data - start_sent);
+ memset((void *)start_sent, 0, start_data - start_sent);
if (end_sent > end_data)
- memset((char *)end_data, 0, end_sent - end_data);
+ memset((void *)end_data, 0, end_sent - end_data);
/*
@@ -1346,12 +1338,13 @@ boolean_t ds_read_done(ior)
}
static io_return_t
-device_set_status(device, flavor, status, status_count)
- mach_device_t device;
- dev_flavor_t flavor;
- dev_status_t status;
- mach_msg_type_number_t status_count;
+device_set_status(
+ void *dev,
+ dev_flavor_t flavor,
+ dev_status_t status,
+ mach_msg_type_number_t status_count)
{
+ mach_device_t device = dev;
if (device->state != DEV_STATE_OPEN)
return (D_NO_SUCH_DEVICE);
@@ -1364,12 +1357,13 @@ device_set_status(device, flavor, status, status_count)
}
io_return_t
-mach_device_get_status(device, flavor, status, status_count)
- mach_device_t device;
- dev_flavor_t flavor;
- dev_status_t status; /* pointer to OUT array */
- mach_msg_type_number_t *status_count; /* out */
+mach_device_get_status(
+ void *dev,
+ dev_flavor_t flavor,
+ dev_status_t status, /* pointer to OUT array */
+ mach_msg_type_number_t *status_count) /* out */
{
+ mach_device_t device = dev;
if (device->state != DEV_STATE_OPEN)
return (D_NO_SUCH_DEVICE);
@@ -1382,13 +1376,13 @@ mach_device_get_status(device, flavor, status, status_count)
}
static io_return_t
-device_set_filter(device, receive_port, priority, filter, filter_count)
- mach_device_t device;
- ipc_port_t receive_port;
- int priority;
- filter_t filter[]; /* pointer to IN array */
- unsigned int filter_count;
+device_set_filter(void *dev,
+ const ipc_port_t receive_port,
+ int priority,
+ filter_t filter[],
+ unsigned int filter_count)
{
+ mach_device_t device = dev;
if (device->state != DEV_STATE_OPEN)
return (D_NO_SUCH_DEVICE);
@@ -1408,14 +1402,15 @@ device_set_filter(device, receive_port, priority, filter, filter_count)
}
static io_return_t
-device_map(device, protection, offset, size, pager, unmap)
- mach_device_t device;
- vm_prot_t protection;
- vm_offset_t offset;
- vm_size_t size;
- ipc_port_t *pager; /* out */
- boolean_t unmap; /* ? */
+device_map(
+ void *dev,
+ vm_prot_t protection,
+ vm_offset_t offset,
+ vm_size_t size,
+ ipc_port_t *pager, /* out */
+ boolean_t unmap) /* ? */
{
+ mach_device_t device = dev;
if (protection & ~VM_PROT_ALL)
return (KERN_INVALID_ARGUMENT);
@@ -1432,8 +1427,7 @@ device_map(device, protection, offset, size, pager, unmap)
* Doesn't do anything (yet).
*/
static void
-ds_no_senders(notification)
- mach_no_senders_notification_t *notification;
+ds_no_senders(mach_no_senders_notification_t *notification)
{
printf("ds_no_senders called! device_port=0x%lx count=%d\n",
notification->not_header.msgh_remote_port,
@@ -1445,8 +1439,7 @@ decl_simple_lock_data(, io_done_list_lock)
#define splio splsched /* XXX must block ALL io devices */
-void iodone(ior)
- io_req_t ior;
+void iodone(io_req_t ior)
{
spl_t s;
@@ -1479,7 +1472,7 @@ void iodone(ior)
splx(s);
}
-void io_done_thread_continue()
+void __attribute__ ((noreturn)) io_done_thread_continue(void)
{
for (;;) {
spl_t s;
@@ -1514,7 +1507,7 @@ void io_done_thread_continue()
}
}
-void io_done_thread()
+void io_done_thread(void)
{
/*
* Set thread privileges and highest priority.
@@ -1531,7 +1524,7 @@ void io_done_thread()
static void mach_device_trap_init(void); /* forward */
-void mach_device_init()
+void mach_device_init(void)
{
vm_offset_t device_io_min, device_io_max;
@@ -1561,13 +1554,12 @@ void mach_device_init()
device_io_map->wait_for_space = TRUE;
kmem_cache_init(&io_inband_cache, "io_buf_ptr_inband",
- sizeof(io_buf_ptr_inband_t), 0, NULL, NULL, NULL, 0);
+ sizeof(io_buf_ptr_inband_t), 0, NULL, 0);
mach_device_trap_init();
}
-void iowait(ior)
- io_req_t ior;
+void iowait(io_req_t ior)
{
spl_t s;
@@ -1606,7 +1598,7 @@ static void
mach_device_trap_init(void)
{
kmem_cache_init(&io_trap_cache, "io_req", IOTRAP_REQSIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
/*
@@ -1617,7 +1609,7 @@ mach_device_trap_init(void)
* Could call a device-specific routine.
*/
io_req_t
-ds_trap_req_alloc(mach_device_t device, vm_size_t data_size)
+ds_trap_req_alloc(const mach_device_t device, vm_size_t data_size)
{
return (io_req_t) kmem_cache_alloc(&io_trap_cache);
}
@@ -1626,7 +1618,7 @@ ds_trap_req_alloc(mach_device_t device, vm_size_t data_size)
* Called by iodone to release ior.
*/
boolean_t
-ds_trap_write_done(io_req_t ior)
+ds_trap_write_done(const io_req_t ior)
{
mach_device_t dev;
@@ -1688,7 +1680,7 @@ device_write_trap (mach_device_t device, dev_mode_t mode,
* Copy the data from user space.
*/
if (data_count > 0)
- copyin((char *)data, (char *)ior->io_data, data_count);
+ copyin((void *)data, ior->io_data, data_count);
/*
* The ior keeps an extra reference for the device.
@@ -1735,8 +1727,8 @@ device_writev_trap (mach_device_t device, dev_mode_t mode,
*/
if (iocount > 16)
return KERN_INVALID_VALUE; /* lame */
- copyin((char *)iovec,
- (char *)stack_iovec,
+ copyin(iovec,
+ stack_iovec,
iocount * sizeof(io_buf_vec_t));
for (data_count = 0, i = 0; i < iocount; i++)
data_count += stack_iovec[i].count;
@@ -1774,8 +1766,8 @@ device_writev_trap (mach_device_t device, dev_mode_t mode,
p = (vm_offset_t) ior->io_data;
for (i = 0; i < iocount; i++) {
- copyin((char *) stack_iovec[i].data,
- (char *) p,
+ copyin((void *) stack_iovec[i].data,
+ (void *) p,
stack_iovec[i].count);
p += stack_iovec[i].count;
}
diff --git a/device/ds_routines.h b/device/ds_routines.h
index c4333f48..c0543cbc 100644
--- a/device/ds_routines.h
+++ b/device/ds_routines.h
@@ -49,7 +49,6 @@ kern_return_t device_read_alloc(io_req_t, vm_size_t);
kern_return_t device_write_get(io_req_t, boolean_t *);
boolean_t device_write_dealloc(io_req_t);
void device_reference(device_t);
-void device_deallocate(device_t);
boolean_t ds_notify(mach_msg_header_t *msg);
boolean_t ds_open_done(io_req_t);
@@ -59,10 +58,29 @@ boolean_t ds_write_done(io_req_t);
void iowait (io_req_t ior);
kern_return_t device_pager_setup(
- mach_device_t device,
- int prot,
- vm_offset_t offset,
- vm_size_t size,
- mach_port_t *pager);
+ const mach_device_t device,
+ int prot,
+ vm_offset_t offset,
+ vm_size_t size,
+ mach_port_t *pager);
+
+extern void mach_device_init(void);
+extern void dev_lookup_init(void);
+extern void device_pager_init(void);
+extern void io_done_thread(void) __attribute__ ((noreturn));
+
+io_return_t ds_device_write_trap(
+ device_t dev,
+ dev_mode_t mode,
+ recnum_t recnum,
+ vm_offset_t data,
+ vm_size_t count);
+
+io_return_t ds_device_writev_trap(
+ device_t dev,
+ dev_mode_t mode,
+ recnum_t recnum,
+ io_buf_vec_t *iovec,
+ vm_size_t count);
#endif /* DS_ROUTINES_H */
diff --git a/device/if_ether.h b/device/if_ether.h
index 2ac938e2..91d4d9a6 100644
--- a/device/if_ether.h
+++ b/device/if_ether.h
@@ -45,13 +45,8 @@ struct ether_header {
u_short ether_type;
};
-#define ETHERMTU 1500
-#define ETHERMIN (60-14)
-
#ifdef KERNEL
-u_char etherbroadcastaddr[6];
-
-extern char * ether_sprintf(u_char *);
+extern char * ether_sprintf(const u_char *);
#endif /* KERNEL */
#endif /*_DEVICE_IF_ETHER_H_*/
diff --git a/device/io_req.h b/device/io_req.h
index 65e23e60..1ad46801 100644
--- a/device/io_req.h
+++ b/device/io_req.h
@@ -42,7 +42,7 @@
#include <device/device_types.h>
#include <device/dev_hdr.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
/*
* IO request element, queued on device for delayed replies.
diff --git a/device/kmsg.c b/device/kmsg.c
index c8bd897a..c80775d9 100644
--- a/device/kmsg.c
+++ b/device/kmsg.c
@@ -42,11 +42,11 @@ static int kmsg_read_offset;
/* I/O request queue for blocking read */
static queue_head_t kmsg_read_queue;
/* Used for exclusive access to the device */
-static int kmsg_in_use;
+static boolean_t kmsg_in_use;
/* Used for exclusive access to the routines */
decl_simple_lock_data (static, kmsg_lock);
/* If already initialized or not */
-static int kmsg_init_done = 0;
+static boolean_t kmsg_init_done = FALSE;
/* Kernel Message Initializer */
static void
@@ -55,13 +55,13 @@ kmsginit (void)
kmsg_write_offset = 0;
kmsg_read_offset = 0;
queue_init (&kmsg_read_queue);
- kmsg_in_use = 0;
+ kmsg_in_use = FALSE;
simple_lock_init (&kmsg_lock);
}
/* Kernel Message Open Handler */
io_return_t
-kmsgopen (dev_t dev, int flag, io_req_t ior)
+kmsgopen (dev_t dev, int flag, const io_req_t ior)
{
simple_lock (&kmsg_lock);
if (kmsg_in_use)
@@ -70,21 +70,20 @@ kmsgopen (dev_t dev, int flag, io_req_t ior)
return D_ALREADY_OPEN;
}
- kmsg_in_use = 1;
+ kmsg_in_use = TRUE;
simple_unlock (&kmsg_lock);
return D_SUCCESS;
}
/* Kernel Message Close Handler */
-io_return_t
+void
kmsgclose (dev_t dev, int flag)
{
simple_lock (&kmsg_lock);
- kmsg_in_use = 0;
+ kmsg_in_use = FALSE;
simple_unlock (&kmsg_lock);
- return D_SUCCESS;
}
static boolean_t kmsg_read_done (io_req_t ior);
@@ -225,7 +224,7 @@ kmsg_putchar (int c)
if (!kmsg_init_done)
{
kmsginit ();
- kmsg_init_done = 1;
+ kmsg_init_done = TRUE;
}
simple_lock (&kmsg_lock);
diff --git a/device/kmsg.h b/device/kmsg.h
index b8c1f366..8d907f1b 100644
--- a/device/kmsg.h
+++ b/device/kmsg.h
@@ -8,7 +8,7 @@
#include <device/io_req.h>
io_return_t kmsgopen (dev_t dev, int flag, io_req_t ior);
-io_return_t kmsgclose (dev_t dev, int flag);
+void kmsgclose (dev_t dev, int flag);
io_return_t kmsgread (dev_t dev, io_req_t ior);
io_return_t kmsggetstat (dev_t dev, int flavor,
int *data, unsigned int *count);
diff --git a/device/net_io.c b/device/net_io.c
index fd71c024..99af0b29 100644
--- a/device/net_io.c
+++ b/device/net_io.c
@@ -144,7 +144,7 @@ vm_size_t net_kmsg_size; /* initialized below */
ipc_kmsg_t
net_kmsg_get(void)
{
- register ipc_kmsg_t kmsg;
+ ipc_kmsg_t kmsg;
spl_t s;
/*
@@ -196,7 +196,7 @@ net_kmsg_get(void)
}
void
-net_kmsg_put(register ipc_kmsg_t kmsg)
+net_kmsg_put(const ipc_kmsg_t kmsg)
{
spl_t s;
@@ -212,7 +212,7 @@ net_kmsg_put(register ipc_kmsg_t kmsg)
void
net_kmsg_collect(void)
{
- register ipc_kmsg_t kmsg;
+ ipc_kmsg_t kmsg;
spl_t s;
s = splimp();
@@ -238,7 +238,7 @@ net_kmsg_collect(void)
void
net_kmsg_more(void)
{
- register ipc_kmsg_t kmsg;
+ ipc_kmsg_t kmsg;
/*
* Replenish net kmsg pool if low. We don't have the locks
@@ -301,17 +301,13 @@ struct net_rcv_port {
filter_t filter[NET_MAX_FILTER];
/* filter operations */
};
-typedef struct net_rcv_port *net_rcv_port_t;
struct kmem_cache net_rcv_cache; /* cache of net_rcv_port structs */
-
#define NET_HASH_SIZE 256
#define N_NET_HASH 4
#define N_NET_HASH_KEYS 4
-unsigned int bpf_hash (int, unsigned int *);
-
/*
* A single hash entry.
*/
@@ -323,7 +319,6 @@ struct net_hash_entry {
int rcv_qlimit; /* qlimit for the port */
unsigned int keys[N_NET_HASH_KEYS];
};
-typedef struct net_hash_entry *net_hash_entry_t;
struct kmem_cache net_hash_entry_cache;
@@ -342,8 +337,6 @@ struct net_hash_header {
net_hash_entry_t table[NET_HASH_SIZE];
} filter_hash_header[N_NET_HASH];
-typedef struct net_hash_header *net_hash_header_t;
-
decl_simple_lock_data(,net_hash_header_lock)
#define HASH_ITERATE(head, elt) (elt) = (net_hash_entry_t) (head); do {
@@ -351,7 +344,6 @@ decl_simple_lock_data(,net_hash_header_lock)
(elt) = (net_hash_entry_t) queue_next((queue_entry_t) (elt)); \
} while ((elt) != (head));
-
#define FILTER_ITERATE(if_port_list, fp, nextfp, chain) \
for ((fp) = (net_rcv_port_t) queue_first(if_port_list); \
!queue_end(if_port_list, (queue_entry_t)(fp)); \
@@ -361,44 +353,10 @@ decl_simple_lock_data(,net_hash_header_lock)
/* entry_p must be net_rcv_port_t or net_hash_entry_t */
#define ENQUEUE_DEAD(dead, entry_p, chain) { \
- queue_next(&(entry_p)->chain) = (queue_entry_t) (dead); \
+ (entry_p)->chain.next = (queue_entry_t) (dead); \
(dead) = (queue_entry_t)(entry_p); \
}
-extern boolean_t net_do_filter(); /* CSPF */
-extern int bpf_do_filter(); /* BPF */
-
-int hash_ent_remove (
- struct ifnet *ifp,
- net_hash_header_t hp,
- int used,
- net_hash_entry_t *head,
- net_hash_entry_t entp,
- queue_entry_t *dead_p);
-
-void net_free_dead_infp (queue_entry_t dead_infp);
-void net_free_dead_entp (queue_entry_t dead_entp);
-
-int bpf_validate(
- bpf_insn_t f,
- int bytes,
- bpf_insn_t *match);
-
-int bpf_eq (
- bpf_insn_t f1,
- bpf_insn_t f2,
- register int bytes);
-
-int net_add_q_info (ipc_port_t rcv_port);
-
-int bpf_match (
- net_hash_header_t hash,
- int n_keys,
- unsigned int *keys,
- net_hash_entry_t **hash_headpp,
- net_hash_entry_t *entpp);
-
-
/*
* ethernet_priority:
*
@@ -411,9 +369,9 @@ int bpf_match (
*/
boolean_t ethernet_priority(kmsg)
- ipc_kmsg_t kmsg;
+ const ipc_kmsg_t kmsg;
{
- register unsigned char *addr =
+ unsigned char *addr =
(unsigned char *) net_kmsg(kmsg)->header;
/*
@@ -454,10 +412,9 @@ mach_msg_type_t packet_type = {
* Dequeues a message and delivers it at spl0.
* Returns FALSE if no messages.
*/
-boolean_t net_deliver(nonblocking)
- boolean_t nonblocking;
+boolean_t net_deliver(boolean_t nonblocking)
{
- register ipc_kmsg_t kmsg;
+ ipc_kmsg_t kmsg;
boolean_t high_priority;
struct ipc_kmsg_queue send_list;
@@ -510,6 +467,7 @@ boolean_t net_deliver(nonblocking)
/* remember message sizes must be rounded up */
kmsg->ikm_header.msgh_size =
(((mach_msg_size_t) (sizeof(struct net_rcv_msg)
+ - sizeof net_kmsg(kmsg)->sent
- NET_RCV_MAX + count)) + 3) &~ 3;
kmsg->ikm_header.msgh_local_port = MACH_PORT_NULL;
kmsg->ikm_header.msgh_kind = MACH_MSGH_KIND_NORMAL;
@@ -562,7 +520,7 @@ boolean_t net_deliver(nonblocking)
* net_kmsg_get will do a wakeup.
*/
-void net_ast()
+void net_ast(void)
{
spl_t s;
@@ -591,7 +549,7 @@ void net_ast()
(void) splx(s);
}
-void net_thread_continue()
+void __attribute__ ((noreturn)) net_thread_continue(void)
{
for (;;) {
spl_t s;
@@ -617,7 +575,7 @@ void net_thread_continue()
}
}
-void net_thread()
+void net_thread(void)
{
spl_t s;
@@ -645,10 +603,11 @@ void net_thread()
}
void
-reorder_queue(first, last)
- register queue_t first, last;
+reorder_queue(
+ queue_t first,
+ queue_t last)
{
- register queue_entry_t prev, next;
+ queue_entry_t prev, next;
prev = first->prev;
next = last->next;
@@ -668,11 +627,11 @@ reorder_queue(first, last)
* We are already at splimp.
*/
void
-net_packet(ifp, kmsg, count, priority)
- register struct ifnet *ifp;
- register ipc_kmsg_t kmsg;
- unsigned int count;
- boolean_t priority;
+net_packet(
+ struct ifnet *ifp,
+ ipc_kmsg_t kmsg,
+ unsigned int count,
+ boolean_t priority)
{
boolean_t awake;
@@ -681,7 +640,7 @@ net_packet(ifp, kmsg, count, priority)
* Do a quick check to see if it is a kernel TTD packet.
*
* Only check if KernelTTD is enabled, ie. the current
- * device driver supports TTD, and the bootp succeded.
+ * device driver supports TTD, and the bootp succeeded.
*/
if (kttd_enabled && kttd_handle_async(kmsg)) {
/*
@@ -732,12 +691,12 @@ int net_filter_queue_reorder = 0; /* non-zero to enable reordering */
*/
void
net_filter(kmsg, send_list)
- register ipc_kmsg_t kmsg;
+ const ipc_kmsg_t kmsg;
ipc_kmsg_queue_t send_list;
{
- register struct ifnet *ifp;
- register net_rcv_port_t infp, nextfp;
- register ipc_kmsg_t new_kmsg;
+ struct ifnet *ifp;
+ net_rcv_port_t infp, nextfp;
+ ipc_kmsg_t new_kmsg;
net_hash_entry_t entp, *hash_headp;
ipc_port_t dest;
@@ -857,7 +816,7 @@ net_filter(kmsg, send_list)
ipc_kmsg_enqueue(send_list, new_kmsg);
{
- register net_rcv_port_t prevfp;
+ net_rcv_port_t prevfp;
int rcount = ++infp->rcv_count;
/*
@@ -914,14 +873,14 @@ net_filter(kmsg, send_list)
boolean_t
net_do_filter(infp, data, data_count, header)
net_rcv_port_t infp;
- char * data;
+ const char * data;
unsigned int data_count;
- char * header;
+ const char * header;
{
int stack[NET_FILTER_STACK_DEPTH+1];
- register int *sp;
- register filter_t *fp, *fpe;
- register unsigned int op, arg;
+ int *sp;
+ filter_t *fp, *fpe;
+ unsigned int op, arg;
/*
* The filter accesses the header and data
@@ -1052,13 +1011,13 @@ net_do_filter(infp, data, data_count, header)
* Check filter for invalid operations or stack over/under-flow.
*/
boolean_t
-parse_net_filter(filter, count)
- register filter_t *filter;
- unsigned int count;
+parse_net_filter(
+ filter_t *filter,
+ unsigned int count)
{
- register int sp;
- register filter_t *fpe = &filter[count];
- register filter_t op, arg;
+ int sp;
+ filter_t *fpe = &filter[count];
+ filter_t op, arg;
/*
* count is at least 1, and filter[0] is used for flags.
@@ -1146,19 +1105,19 @@ parse_net_filter(filter, count)
* If we are successful, we must consume that right.
*/
io_return_t
-net_set_filter(ifp, rcv_port, priority, filter, filter_count)
- struct ifnet *ifp;
- ipc_port_t rcv_port;
- int priority;
- filter_t *filter;
- unsigned int filter_count;
+net_set_filter(
+ struct ifnet *ifp,
+ ipc_port_t rcv_port,
+ int priority,
+ filter_t *filter,
+ unsigned int filter_count)
{
int filter_bytes;
bpf_insn_t match;
- register net_rcv_port_t infp, my_infp;
+ net_rcv_port_t infp, my_infp;
net_rcv_port_t nextfp;
net_hash_header_t hhp;
- register net_hash_entry_t entp, hash_entp;
+ net_hash_entry_t entp;
net_hash_entry_t *head, nextentp;
queue_entry_t dead_infp, dead_entp;
int i;
@@ -1166,6 +1125,13 @@ net_set_filter(ifp, rcv_port, priority, filter, filter_count)
io_return_t rval;
boolean_t in, out;
+ /* Initialize hash_entp to NULL to quiet GCC
+ * warning about uninitialized variable. hash_entp is only
+ * used when match != 0; in that case it is properly initialized
+ * by kmem_cache_alloc().
+ */
+ net_hash_entry_t hash_entp = NULL;
+
/*
* Check the filter syntax.
*/
@@ -1404,16 +1370,16 @@ clean_and_return:
* Other network operations
*/
io_return_t
-net_getstat(ifp, flavor, status, count)
- struct ifnet *ifp;
- dev_flavor_t flavor;
- dev_status_t status; /* pointer to OUT array */
- natural_t *count; /* OUT */
+net_getstat(
+ struct ifnet *ifp,
+ dev_flavor_t flavor,
+ dev_status_t status, /* pointer to OUT array */
+ natural_t *count) /* OUT */
{
switch (flavor) {
case NET_STATUS:
{
- register struct net_status *ns = (struct net_status *)status;
+ struct net_status *ns = (struct net_status *)status;
if (*count < NET_STATUS_COUNT)
return (D_INVALID_OPERATION);
@@ -1431,9 +1397,9 @@ net_getstat(ifp, flavor, status, count)
}
case NET_ADDRESS:
{
- register int addr_byte_count;
- register int addr_int_count;
- register int i;
+ int addr_byte_count;
+ int addr_int_count;
+ int i;
addr_byte_count = ifp->if_address_size;
addr_int_count = (addr_byte_count + (sizeof(int)-1))
@@ -1454,7 +1420,7 @@ printf ("net_getstat: count: %d, addr_int_count: %d\n",
- addr_byte_count));
for (i = 0; i < addr_int_count; i++) {
- register int word;
+ int word;
word = status[i];
status[i] = htonl(word);
@@ -1469,10 +1435,10 @@ printf ("net_getstat: count: %d, addr_int_count: %d\n",
}
io_return_t
-net_write(ifp, start, ior)
- register struct ifnet *ifp;
- int (*start)();
- io_req_t ior;
+net_write(
+ struct ifnet *ifp,
+ int (*start)(),
+ io_req_t ior)
{
spl_t s;
kern_return_t rc;
@@ -1523,17 +1489,17 @@ net_write(ifp, start, ior)
* Initialize the whole package.
*/
void
-net_io_init()
+net_io_init(void)
{
- register vm_size_t size;
+ vm_size_t size;
size = sizeof(struct net_rcv_port);
kmem_cache_init(&net_rcv_cache, "net_rcv_port", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
size = sizeof(struct net_hash_entry);
kmem_cache_init(&net_hash_entry_cache, "net_hash_entry", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
size = ikm_plus_overhead(sizeof(struct net_rcv_msg));
net_kmsg_size = round_page(size);
@@ -1625,19 +1591,20 @@ net_io_init()
*/
int
-bpf_do_filter(infp, p, wirelen, header, hlen, hash_headpp, entpp)
- net_rcv_port_t infp;
- char * p; /* packet data */
- unsigned int wirelen; /* data_count (in bytes) */
- char * header;
- unsigned int hlen; /* header len (in bytes) */
- net_hash_entry_t **hash_headpp, *entpp; /* out */
+bpf_do_filter(
+ net_rcv_port_t infp,
+ char * p, /* packet data */
+ unsigned int wirelen, /* data_count (in bytes) */
+ char * header,
+ unsigned int hlen, /* header len (in bytes) */
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp) /* out */
{
- register bpf_insn_t pc, pc_end;
- register unsigned int buflen;
+ bpf_insn_t pc, pc_end;
+ unsigned int buflen;
- register unsigned int A, X;
- register int k;
+ unsigned int A, X;
+ int k;
unsigned int mem[BPF_MEMWORDS];
/* Generic pointer to either HEADER or P according to the specified offset. */
@@ -1924,13 +1891,13 @@ bpf_do_filter(infp, p, wirelen, header, hlen, hash_headpp, entpp)
* Otherwise, a bogus program could easily crash the system.
*/
int
-bpf_validate(f, bytes, match)
- bpf_insn_t f;
- int bytes;
- bpf_insn_t *match;
+bpf_validate(
+ bpf_insn_t f,
+ int bytes,
+ bpf_insn_t *match)
{
- register int i, j, len;
- register bpf_insn_t p;
+ int i, j, len;
+ bpf_insn_t p;
len = BPF_BYTES2LEN(bytes);
@@ -1946,7 +1913,7 @@ bpf_validate(f, bytes, match)
*/
p = &f[i];
if (BPF_CLASS(p->code) == BPF_JMP) {
- register int from = i + 1;
+ int from = i + 1;
if (BPF_OP(p->code) == BPF_JA) {
if (from + p->k >= len)
@@ -1996,11 +1963,12 @@ bpf_validate(f, bytes, match)
}
int
-bpf_eq (f1, f2, bytes)
- register bpf_insn_t f1, f2;
- register int bytes;
+bpf_eq(
+ bpf_insn_t f1,
+ bpf_insn_t f2,
+ int bytes)
{
- register int count;
+ int count;
count = BPF_BYTES2LEN(bytes);
for (; count--; f1++, f2++) {
@@ -2016,10 +1984,10 @@ bpf_eq (f1, f2, bytes)
unsigned int
bpf_hash (n, keys)
- register int n;
- register unsigned int *keys;
+ int n;
+ const unsigned int *keys;
{
- register unsigned int hval = 0;
+ unsigned int hval = 0;
while (n--) {
hval += *keys++;
@@ -2031,12 +1999,12 @@ bpf_hash (n, keys)
int
bpf_match (hash, n_keys, keys, hash_headpp, entpp)
net_hash_header_t hash;
- register int n_keys;
- register unsigned int *keys;
+ int n_keys;
+ const unsigned int *keys;
net_hash_entry_t **hash_headpp, *entpp;
{
- register net_hash_entry_t head, entp;
- register int i;
+ net_hash_entry_t head, entp;
+ int i;
if (n_keys != hash->n_keys)
return FALSE;
@@ -2070,12 +2038,13 @@ bpf_match (hash, n_keys, keys, hash_headpp, entpp)
*/
int
-hash_ent_remove (ifp, hp, used, head, entp, dead_p)
- struct ifnet *ifp;
- net_hash_header_t hp;
- int used;
- net_hash_entry_t *head, entp;
- queue_entry_t *dead_p;
+hash_ent_remove(
+ struct ifnet *ifp,
+ net_hash_header_t hp,
+ int used,
+ net_hash_entry_t *head,
+ net_hash_entry_t entp,
+ queue_entry_t *dead_p)
{
hp->ref_count--;
@@ -2107,8 +2076,7 @@ hash_ent_remove (ifp, hp, used, head, entp, dead_p)
}
int
-net_add_q_info (rcv_port)
- ipc_port_t rcv_port;
+net_add_q_info(ipc_port_t rcv_port)
{
mach_port_msgcount_t qlimit = 0;
@@ -2133,8 +2101,7 @@ net_add_q_info (rcv_port)
}
void
-net_del_q_info (qlimit)
- int qlimit;
+net_del_q_info(int qlimit)
{
simple_lock(&net_kmsg_total_lock);
net_queue_free_min--;
@@ -2151,10 +2118,9 @@ net_del_q_info (qlimit)
* No locks should be held when called.
*/
void
-net_free_dead_infp (dead_infp)
- queue_entry_t dead_infp;
+net_free_dead_infp(queue_entry_t dead_infp)
{
- register net_rcv_port_t infp, nextfp;
+ net_rcv_port_t infp, nextfp;
for (infp = (net_rcv_port_t) dead_infp; infp != 0; infp = nextfp)
{
@@ -2173,10 +2139,9 @@ net_free_dead_infp (dead_infp)
* No locks should be held when called.
*/
void
-net_free_dead_entp (dead_entp)
- queue_entry_t dead_entp;
+net_free_dead_entp(queue_entry_t dead_entp)
{
- register net_hash_entry_t entp, nextentp;
+ net_hash_entry_t entp, nextentp;
for (entp = (net_hash_entry_t)dead_entp; entp != 0; entp = nextentp)
{
diff --git a/device/net_io.h b/device/net_io.h
index 5b3a55c6..d4e24d41 100644
--- a/device/net_io.h
+++ b/device/net_io.h
@@ -38,7 +38,7 @@
#include <mach/machine/vm_types.h>
#include <ipc/ipc_kmsg.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/lock.h>
#include <kern/kalloc.h>
@@ -46,6 +46,15 @@
#include <device/io_req.h>
#include <device/net_status.h>
+struct net_rcv_port;
+typedef struct net_rcv_port *net_rcv_port_t;
+
+struct net_hash_entry;
+typedef struct net_hash_entry *net_hash_entry_t;
+
+struct net_hash_header;
+typedef struct net_hash_header *net_hash_header_t;
+
/*
* A network packet is wrapped in a kernel message while in
* the kernel.
@@ -65,7 +74,7 @@ extern void net_kmsg_put(ipc_kmsg_t);
* Network utility routines.
*/
-extern void net_ast();
+extern void net_ast(void);
extern void net_packet(struct ifnet *, ipc_kmsg_t, unsigned int, boolean_t);
extern void net_filter(ipc_kmsg_t, ipc_kmsg_queue_t);
extern io_return_t net_getstat(struct ifnet *, dev_flavor_t, dev_status_t,
@@ -80,6 +89,9 @@ extern vm_size_t net_kmsg_size;
extern void net_kmsg_collect (void);
+extern void net_io_init(void);
+extern void net_thread(void) __attribute__ ((noreturn));
+
#define net_kmsg_alloc() ((ipc_kmsg_t) kalloc(net_kmsg_size))
#define net_kmsg_free(kmsg) kfree((vm_offset_t) (kmsg), net_kmsg_size)
@@ -88,4 +100,53 @@ extern unsigned short int ntohs(unsigned short int);
extern unsigned int htonl(unsigned int);
extern unsigned short int htons(unsigned short int);
+unsigned int bpf_hash(int n, const unsigned int *keys);
+
+extern boolean_t
+net_do_filter(
+ net_rcv_port_t infp,
+ const char * data,
+ unsigned int data_count,
+ const char * header); /* CSPF */
+
+extern int
+bpf_do_filter(
+ net_rcv_port_t infp,
+ char * p,
+ unsigned int wirelen,
+ char * header,
+ unsigned int hlen,
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp); /* BPF */
+
+int hash_ent_remove(
+ struct ifnet *ifp,
+ net_hash_header_t hp,
+ int used,
+ net_hash_entry_t *head,
+ net_hash_entry_t entp,
+ queue_entry_t *dead_p);
+
+void net_free_dead_infp(queue_entry_t dead_infp);
+void net_free_dead_entp (queue_entry_t dead_entp);
+
+int bpf_validate(
+ bpf_insn_t f,
+ int bytes,
+ bpf_insn_t *match);
+
+int bpf_eq(
+ bpf_insn_t f1,
+ bpf_insn_t f2,
+ int bytes);
+
+int net_add_q_info(ipc_port_t rcv_port);
+
+int bpf_match (
+ net_hash_header_t hash,
+ int n_keys,
+ const unsigned int *keys,
+ net_hash_entry_t **hash_headpp,
+ net_hash_entry_t *entpp);
+
#endif /* _DEVICE_NET_IO_H_ */
diff --git a/device/subrs.c b/device/subrs.c
index a82bae38..a10b72d7 100644
--- a/device/subrs.c
+++ b/device/subrs.c
@@ -40,32 +40,27 @@
/*
* Print out disk name and block number for hard disk errors.
*/
-void harderr(bp, cp)
- struct buf *bp;
- char * cp;
+void harderr(ior, cp)
+ const io_req_t ior;
+ const char * cp;
{
printf("%s%d%c: hard error sn%d ",
cp,
- minor(bp->b_dev) >> 3,
- 'a' + (minor(bp->b_dev) & 0x7),
- bp->b_blkno);
+ minor(ior->io_unit) >> 3,
+ 'a' + (minor(ior->io_unit) & 0x7),
+ ior->io_recnum);
}
/*
- * Ethernet support routines.
- */
-u_char etherbroadcastaddr[6] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
-
-/*
* Convert Ethernet address to printable (loggable) representation.
*/
char *
ether_sprintf(ap)
- register u_char *ap;
+ const u_char *ap;
{
- register int i;
+ int i;
static char etherbuf[18];
- register char *cp = etherbuf;
+ char *cp = etherbuf;
static char digits[] = "0123456789abcdef";
for (i = 0; i < 6; i++) {
@@ -80,8 +75,7 @@ ether_sprintf(ap)
/*
* Initialize send and receive queues on an interface.
*/
-void if_init_queues(ifp)
- register struct ifnet *ifp;
+void if_init_queues(struct ifnet *ifp)
{
IFQ_INIT(&ifp->if_snd);
queue_init(&ifp->if_rcv_port_list);
@@ -108,11 +102,11 @@ void wakeup(channel)
thread_wakeup((event_t) channel);
}
-struct buf *
+io_req_t
geteblk(size)
int size;
{
- register io_req_t ior;
+ io_req_t ior;
io_req_alloc(ior, 0);
ior->io_device = (mach_device_t)0;
@@ -133,11 +127,9 @@ geteblk(size)
return (ior);
}
-void brelse(bp)
- struct buf *bp;
+void brelse(ior)
+ io_req_t ior;
{
- register io_req_t ior = bp;
-
(void) vm_deallocate(kernel_map,
(vm_offset_t) ior->io_data,
ior->io_alloc_size);
diff --git a/device/tty.h b/device/tty.h
index dcc77119..d7aa2add 100644
--- a/device/tty.h
+++ b/device/tty.h
@@ -72,8 +72,8 @@ struct tty {
* Items beyond this point should be removed to device-specific
* extension structures.
*/
- int (*t_getstat)(); /* routine to get status */
- int (*t_setstat)(); /* routine to set status */
+ io_return_t (*t_getstat)(dev_t, int, int *, natural_t *); /* routine to get status */
+ io_return_t (*t_setstat)(dev_t, int, int *, natural_t); /* routine to set status */
dev_ops_t t_tops; /* another device to possibly
push through */
};
@@ -184,7 +184,7 @@ extern boolean_t tty_portdeath(
#define TS_TRANSLATE 0x00100000 /* translation device enabled */
#define TS_KDB 0x00200000 /* should enter kdb on ALT */
-#define TS_MIN_TO_RCV 0x00400000 /* character recived during
+#define TS_MIN_TO_RCV 0x00400000 /* character received during
receive timeout interval */
/* flags - old names defined in terms of new ones */
@@ -234,6 +234,4 @@ struct ldisc_switch {
extern struct ldisc_switch linesw[];
-extern void chario_init(void);
-
#endif /* _DEVICE_TTY_H_ */
diff --git a/doc/mach.texi b/doc/mach.texi
index 9ad9e70b..0aeed766 100644
--- a/doc/mach.texi
+++ b/doc/mach.texi
@@ -193,7 +193,7 @@ Port Manipulation Interface
* Receive Rights:: How to work with receive rights.
* Port Sets:: How to work with port sets.
* Request Notifications:: How to request notifications for events.
-@c * Inherited Ports:: How to work with the inherited system ports.
+* Inherited Ports:: How to work with the inherited system ports.
Virtual Memory Interface
@@ -1330,6 +1330,15 @@ which is conventionally used as a reply port by the recipient of the
message. The field must carry a send right, a send-once right,
@code{MACH_PORT_NULL}, or @code{MACH_PORT_DEAD}.
+@item unsigned long msgh_protected_payload
+The @code{msgh_protected_payload} field carries a payload that is set
+by the kernel during message delivery. The payload is an opaque
+identifier that can be used by the receiver to lookup the associated
+data structure.
+
+It is only valid in received messages. See @ref{Message Receive} for
+further information.
+
@item mach_port_seqno_t msgh_seqno
The @code{msgh_seqno} field provides a sequence number for the message.
It is only valid in received messages; its value in sent messages is
@@ -1417,6 +1426,7 @@ types are predefined:
@item MACH_MSG_TYPE_STRING
@item MACH_MSG_TYPE_STRING_C
@item MACH_MSG_TYPE_PORT_NAME
+@item MACH_MSG_TYPE_PROTECTED_PAYLOAD
@end table
The following predefined types specify port rights, and receive special
@@ -1435,6 +1445,11 @@ should be used in preference to @code{MACH_MSG_TYPE_INTEGER_32}.
@item MACH_MSG_TYPE_MAKE_SEND_ONCE
@end table
+The type @code{MACH_MSG_TYPE_PROTECTED_PAYLOAD} is used by the kernel
+to indicate that a delivered message carries a payload in the
+@code{msgh_protected_payload} field. See @ref{Message Receive} for
+more information.
+
@item msgt_size : 8
The @code{msgt_size} field specifies the size of each datum, in bits. For
example, the msgt_size of @code{MACH_MSG_TYPE_INTEGER_32} data is 32.
@@ -1934,6 +1949,25 @@ loses the receive right after the message was dequeued from it, then
right still exists, but isn't held by the caller, then
@code{msgh_local_port} specifies @code{MACH_PORT_NULL}.
+Servers usually associate some state with a receive right. To that
+end, they might use a hash table to look up the state for the port a
+message was sent to. To optimize this, a task may associate an opaque
+@var{payload} with a receive right using the
+@code{mach_port_set_protected_payload} function. Once this is done,
+the kernel will set the @code{msgh_protected_payload} field to
+@var{payload} when delivering a message to this right and indicate
+this by setting the local part of @code{msgh_bits} to
+@code{MACH_MSG_TYPE_PROTECTED_PAYLOAD}.
+
+The support for protected payloads was added to GNU Mach. To preserve
+binary compatibility, the @code{msgh_local_port} and
+@code{msgh_local_port} share the same location. This makes it
+possible to add the payload information without increasing the size of
+@code{mach_msg_header_t}. This is an implementation detail. Which
+field is valid is determined by the local part of the
+@code{msgh_bits}. Existing software is not affected. When a receive
+right is transferred to another task, its payload is cleared.
+
Received messages are stamped with a sequence number, taken from the
port from which the message was received. (Messages received from a
port set are stamped with a sequence number from the appropriate member
@@ -2164,7 +2198,7 @@ the kernel.
* Receive Rights:: How to work with receive rights.
* Port Sets:: How to work with port sets.
* Request Notifications:: How to request notifications for events.
-@c * Inherited Ports:: How to work with the inherited system ports.
+* Inherited Ports:: How to work with the inherited system ports.
@end menu
@@ -2715,6 +2749,41 @@ In addition to the normal diagnostic return codes from the call's server
(normally the kernel), the call may return @code{mach_msg} return codes.
@end deftypefun
+@deftypefun kern_return_t mach_port_set_protected_payload (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{unsigned long @var{payload}})
+The function @code{mach_port_set_protected_payload} sets the protected
+payload associated with the right @var{name} to @var{payload}.
+Section @ref{Message Receive} describes how setting a protected
+payload affects the messages delivered to @var{name}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_set_protected_payload} call is actually an RPC to
+@var{task}, normally a send right for a task port, but potentially any
+send right. In addition to the normal diagnostic return codes from
+the call's server (normally the kernel), the call may return
+@code{mach_msg} return codes.
+@end deftypefun
+
+@deftypefun kern_return_t mach_port_clear_protected_payload (@w{ipc_space_t @var{task}}, @w{mach_port_t @var{name}}, @w{unsigned long @var{payload}})
+The function @code{mach_port_clear_protected_payload} clears the
+protected payload associated with the right @var{name}.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded,
+@code{KERN_INVALID_TASK} if @var{task} was invalid,
+@code{KERN_INVALID_NAME} if @var{name} did not denote a right and
+@code{KERN_INVALID_RIGHT} if @var{name} denoted a right, but not a
+receive right.
+
+The @code{mach_port_clear_protected_payload} call is actually an RPC
+to @var{task}, normally a send right for a task port, but potentially
+any send right. In addition to the normal diagnostic return codes
+from the call's server (normally the kernel), the call may return
+@code{mach_msg} return codes.
+@end deftypefun
@node Port Sets
@subsection Port Sets
@@ -2848,66 +2917,69 @@ call's server (normally the kernel), the call may return @code{mach_msg}
return codes.
@end deftypefun
-@c The inherited ports concept is not used in the Hurd,
-@c and so the _SLOT macros are not defined in GNU Mach.
-
-@c @node Inherited Ports
-@c @subsection Inherited Ports
-
-@c @deftypefun kern_return_t mach_ports_register (@w{task_t @var{target_task}, @w{port_array_t @var{init_port_set}}, @w{int @var{init_port_array_count}})
-@c @deftypefunx kern_return_t mach_ports_lookup (@w{task_t @var{target_task}, @w{port_array_t *@var{init_port_set}}, @w{int *@var{init_port_array_count}})
-@c @code{mach_ports_register} manipulates the inherited ports array,
-@c @code{mach_ports_lookup} is used to acquire specific parent ports.
-@c @var{target_task} is the task to be affected. @var{init_port_set} is an
-@c array of system ports to be registered, or returned. Although the array
-@c size is given as variable, the kernel will only accept a limited number
-@c of ports. @var{init_port_array_count} is the number of ports returned
-@c in @var{init_port_set}.
-
-@c @code{mach_ports_register} registers an array of well-known system ports
-@c with the kernel on behalf of a specific task. Currently the ports to be
-@c registered are: the port to the Network Name Server, the port to the
-@c Environment Manager, and a port to the Service server. These port
-@c values must be placed in specific slots in the init_port_set. The slot
-@c numbers are given by the global constants defined in @file{mach_init.h}:
-@c @code{NAME_SERVER_SLOT}, @code{ENVIRONMENT_SLOT}, and
-@c @code{SERVICE_SLOT}. These ports may later be retrieved with
-@c @code{mach_ports_lookup}.
-
-@c When a new task is created (see @code{task_create}), the child task will
-@c be given access to these ports. Only port send rights may be
-@c registered. Furthermore, the number of ports which may be registered is
-@c fixed and given by the global constant @code{MACH_PORT_SLOTS_USED}
-@c Attempts to register too many ports will fail.
-
-@c It is intended that this mechanism be used only for task initialization,
-@c and then only by runtime support modules. A parent task has three
-@c choices in passing these system ports to a child task. Most commonly it
-@c can do nothing and its child will inherit access to the same
-@c @var{init_port_set} that the parent has; or a parent task may register a
-@c set of ports it wishes to have passed to all of its children by calling
-@c @code{mach_ports_register} using its task port; or it may make necessary
-@c modifications to the set of ports it wishes its child to see, and then
-@c register those ports using the child's task port prior to starting the
-@c child's thread(s). The @code{mach_ports_lookup} call which is done by
-@c @code{mach_init} in the child task will acquire these initial ports for
-@c the child.
-
-@c Tasks other than the Network Name Server and the Environment Manager
-@c should not need access to the Service port. The Network Name Server port
-@c is the same for all tasks on a given machine. The Environment port is
-@c the only port likely to have different values for different tasks.
-
-@c Since the number of ports which may be registered is limited, ports
-@c other than those used by the runtime system to initialize a task should
-@c be passed to children either through an initial message, or through the
-@c Network Name Server for public ports, or the Environment Manager for
-@c private ports.
-
-@c The function returns @code{KERN_SUCCESS} if the memory was allocated,
-@c and @code{KERN_INVALID_ARGUMENT} if an attempt was made to register more
-@c ports than the current kernel implementation allows.
-@c @end deftypefun
+@node Inherited Ports
+@subsection Inherited Ports
+
+The inherited ports concept is not used in the Hurd, and so the _SLOT
+macros are not defined in GNU Mach.
+
+The following section documents how @code{mach_ports_register} and
+@code{mach_ports_lookup} were originally intended to be used.
+
+@deftypefun kern_return_t mach_ports_register (@w{task_t @var{target_task}}, @w{port_array_t @var{init_port_set}}, @w{int @var{init_port_array_count}})
+@deftypefunx kern_return_t mach_ports_lookup (@w{task_t @var{target_task}}, @w{port_array_t *@var{init_port_set}}, @w{int *@var{init_port_array_count}})
+@code{mach_ports_register} manipulates the inherited ports array,
+@code{mach_ports_lookup} is used to acquire specific parent ports.
+@var{target_task} is the task to be affected. @var{init_port_set} is an
+array of system ports to be registered, or returned. Although the array
+size is given as variable, the kernel will only accept a limited number
+of ports. @var{init_port_array_count} is the number of ports returned
+in @var{init_port_set}.
+
+@code{mach_ports_register} registers an array of well-known system ports
+with the kernel on behalf of a specific task. Currently the ports to be
+registered are: the port to the Network Name Server, the port to the
+Environment Manager, and a port to the Service server. These port
+values must be placed in specific slots in the init_port_set. The slot
+numbers are given by the global constants defined in @file{mach_init.h}:
+@code{NAME_SERVER_SLOT}, @code{ENVIRONMENT_SLOT}, and
+@code{SERVICE_SLOT}. These ports may later be retrieved with
+@code{mach_ports_lookup}.
+
+When a new task is created (see @code{task_create}), the child task will
+be given access to these ports. Only port send rights may be
+registered. Furthermore, the number of ports which may be registered is
+fixed and given by the global constant @code{MACH_PORT_SLOTS_USED}
+Attempts to register too many ports will fail.
+
+It is intended that this mechanism be used only for task initialization,
+and then only by runtime support modules. A parent task has three
+choices in passing these system ports to a child task. Most commonly it
+can do nothing and its child will inherit access to the same
+@var{init_port_set} that the parent has; or a parent task may register a
+set of ports it wishes to have passed to all of its children by calling
+@code{mach_ports_register} using its task port; or it may make necessary
+modifications to the set of ports it wishes its child to see, and then
+register those ports using the child's task port prior to starting the
+child's thread(s). The @code{mach_ports_lookup} call which is done by
+@code{mach_init} in the child task will acquire these initial ports for
+the child.
+
+Tasks other than the Network Name Server and the Environment Manager
+should not need access to the Service port. The Network Name Server port
+is the same for all tasks on a given machine. The Environment port is
+the only port likely to have different values for different tasks.
+
+Since the number of ports which may be registered is limited, ports
+other than those used by the runtime system to initialize a task should
+be passed to children either through an initial message, or through the
+Network Name Server for public ports, or the Environment Manager for
+private ports.
+
+The function returns @code{KERN_SUCCESS} if the memory was allocated,
+and @code{KERN_INVALID_ARGUMENT} if an attempt was made to register more
+ports than the current kernel implementation allows.
+@end deftypefun
@node Virtual Memory Interface
@@ -3169,14 +3241,15 @@ successfully set and @code{KERN_INVALID_ADDRESS} if an invalid or
non-allocated address was specified.
@end deftypefun
-@deftypefun kern_return_t vm_wire (@w{host_priv_t @var{host_priv}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{vm_prot_t @var{access}})
-The function @code{vm_wire} allows privileged applications to control
-memory pageability. @var{host_priv} is the privileged host port for the
+@deftypefun kern_return_t vm_wire (@w{host_t @var{host}}, @w{vm_task_t @var{target_task}}, @w{vm_address_t @var{address}}, @w{vm_size_t @var{size}}, @w{vm_prot_t @var{access}})
+The function @code{vm_wire} allows applications to control
+memory pageability. @var{host} is the host port for the
host on which @var{target_task} resides. @var{address} is the starting
address, which will be rounded down to a page boundary. @var{size} is
the size in bytes of the region for which protection is to change, and
will be rounded up to give a page boundary. @var{access} specifies the
-types of accesses that must not cause page faults.
+types of accesses that must not cause page faults. If the host port is
+not privileged, the amount of memory is limited per task.
The semantics of a successful @code{vm_wire} operation are that memory
in the specified range will not cause page faults for any accesses
@@ -3185,7 +3258,7 @@ access argument of @code{VM_PROT_READ | VM_PROT_WRITE}. A special case
is that @code{VM_PROT_NONE} makes the memory pageable.
The function returns @code{KERN_SUCCESS} if the call succeeded,
-@code{KERN_INVALID_HOST} if @var{host_priv} was not the privileged host
+@code{KERN_INVALID_HOST} if @var{host} was not a valid host
port, @code{KERN_INVALID_TASK} if @var{task} was not a valid task,
@code{KERN_INVALID_VALUE} if @var{access} specified an invalid access
mode, @code{KERN_FAILURE} if some memory in the specified range is not
@@ -3193,7 +3266,7 @@ present or has an inappropriate protection value, and
@code{KERN_INVALID_ARGUMENT} if unwiring (@var{access} is
@code{VM_PROT_NONE}) and the memory is not already wired.
-The @code{vm_wire} call is actually an RPC to @var{host_priv}, normally
+The @code{vm_wire} call is actually an RPC to @var{host}, normally
a send right for a privileged host port, but potentially any send right.
In addition to the normal diagnostic return codes from the call's server
(normally the kernel), the call may return @code{mach_msg} return codes.
@@ -3294,7 +3367,7 @@ exception.
@var{target_task} is the task to be affected. The starting address is
@var{address}. If the @var{anywhere} option is used, this address is
-ignored. The address actually allocated will be returned in
+used as a starting hint. The address actually allocated will be returned in
@var{address}. @var{size} is the number of bytes to allocate (rounded by
the system in a machine dependent way). The alignment restriction is
specified by @var{mask}. Bits asserted in this mask must not be
@@ -3453,8 +3526,9 @@ each will perform a @code{memory_object_init} call with new request and
name ports. The virtual page size that is used by the calling kernel is
included for planning purposes.
-When the memory manager is prepared to accept requests for data for this
-object, it must call @code{memory_object_ready} with the attribute.
+When the memory manager is prepared to accept requests for data for
+this object, it must call @code{memory_object_ready}, or set the ready
+flag using @code{memory_object_set_attributes}.
Otherwise the kernel will not process requests on this object. To
reject all mappings of this object, the memory manager may use
@code{memory_object_destroy}.
@@ -4557,7 +4631,7 @@ their priority from their task and their max priority from the thread.
@deftypefun kern_return_t thread_priority (@w{thread_t @var{thread}}, @w{int @var{prority}}, @w{boolean_t @var{set_max}})
The function @code{thread_priority} changes the priority and optionally
-the maximum priority of @var{thread}. Priorities range from 0 to 31,
+the maximum priority of @var{thread}. Priorities range from 0 to 49,
where lower numbers denote higher priorities. If the new priority is
higher than the priority of the current thread, preemption may occur as
a result of this call. The maximum priority of the thread is also set
@@ -4568,7 +4642,7 @@ priority.
The functions returns @code{KERN_SUCCESS} if the operation completed
successfully, @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a
-thread or @var{priority} is out of range (not in 0..31), and
+thread or @var{priority} is out of range (not in 0..49), and
@code{KERN_FAILURE} if the requested operation would violate the
thread's maximum priority (thread_priority).
@end deftypefun
@@ -4582,7 +4656,7 @@ legal value.
The functions returns @code{KERN_SUCCESS} if the operation completed
successfully, @code{KERN_INVALID_ARGUMENT} if @var{thread} is not a
thread or @var{processor_set} is not a control port for a processor set
-or @var{priority} is out of range (not in 0..31), and
+or @var{priority} is out of range (not in 0..49), and
@code{KERN_FAILURE} if the thread is not assigned to the processor set
whose control port was presented.
@end deftypefun
@@ -5037,6 +5111,17 @@ total system run time for live threads
This is a pointer to a @code{struct task_thread_times_info}.
@end deftp
+@deftypefun kern_return_t task_set_name (@w{task_t @var{target_task}}, @w{kernel_debug_name_t @var{name}})
+
+The function @code{task_set_name} sets the name of @var{target_task}
+to @var{name}, truncating it if necessary.
+
+This is a debugging aid. The name is used in diagnostic messages
+printed by the kernel.
+
+The function returns @code{KERN_SUCCESS} if the call succeeded.
+@end deftypefun
+
@node Task Execution
@subsection Task Execution
@@ -7018,8 +7103,9 @@ session. If the execution is resumed again, the numbers may change.
The current thread can be distinguished from others by a @code{#} after
the thread id instead of @code{:}. Without @code{l} option, it only
shows thread id, thread structure address and the status for each
-thread. The status consists of 5 letters, R(run), W(wait), S(suspended),
-O(swapped out) and N(interruptible), and if corresponding
+thread. The status consists of 6 letters, R(run), W(wait), S(suspended),
+O(swapped out), N(interruptible), and F(loating) point arithmetic used (if
+supported by the platform). If the corresponding
status bit is off, @code{.} is printed instead. If @code{l} option is
specified, more detail information is printed for each thread.
diff --git a/i386/Makefrag.am b/i386/Makefrag.am
index 738c60ae..4e56543f 100644
--- a/i386/Makefrag.am
+++ b/i386/Makefrag.am
@@ -20,10 +20,13 @@
libkernel_a_SOURCES += \
i386/i386at/autoconf.c \
i386/i386at/autoconf.h \
+ i386/i386at/biosmem.c \
+ i386/i386at/biosmem.h \
i386/i386at/conf.c \
i386/i386at/cons_conf.c \
i386/i386at/idt.h \
i386/i386at/model_dep.c \
+ i386/i386at/model_dep.h \
i386/include/mach/sa/stdarg.h
if PLATFORM_at
@@ -50,9 +53,29 @@ libkernel_a_SOURCES += \
i386/i386at/kdasm.S \
i386/i386at/kdsoft.h \
i386/i386at/mem.c \
+ i386/i386at/mem.h \
i386/i386at/pic_isa.c \
i386/i386at/rtc.c \
i386/i386at/rtc.h
+
+libkernel_a_SOURCES += \
+ i386/i386at/acpihalt.c \
+ i386/i386at/acpihalt.h \
+ i386/i386at/acpi.c \
+ i386/i386at/grub_glue.c \
+ i386/grub/err.h \
+ i386/grub/cpu/io.h \
+ i386/grub/cpu/types.h \
+ i386/grub/cpu/time.h \
+ i386/grub/mm.h \
+ i386/grub/acpi.h \
+ i386/grub/symbol.h \
+ i386/grub/misc.h \
+ i386/grub/types.h \
+ i386/grub/time.h \
+ i386/grub/i18n.h \
+ i386/grub/compiler.h \
+ i386/grub/glue.h
endif
#
@@ -62,7 +85,7 @@ endif
if enable_lpr
libkernel_a_SOURCES += \
i386/i386at/lpr.c \
- i386/i386at/lprreg.h
+ i386/i386at/lpr.h
endif
@@ -74,6 +97,7 @@ libkernel_a_SOURCES += \
i386/i386/ast.h \
i386/i386/ast_check.c \
i386/i386/ast_types.h \
+ i386/i386/cpu.h \
i386/i386/cpu_number.h \
i386/i386/cswitch.S \
i386/i386/db_disasm.c \
@@ -81,6 +105,7 @@ libkernel_a_SOURCES += \
i386/i386/db_interface.h \
i386/i386/db_machdep.h \
i386/i386/db_trace.c \
+ i386/i386/db_trace.h \
i386/i386/debug.h \
i386/i386/debug_i386.c \
i386/i386/debug_trace.S \
@@ -134,7 +159,6 @@ libkernel_a_SOURCES += \
i386/i386/user_ldt.c \
i386/i386/user_ldt.h \
i386/i386/vm_param.h \
- i386/i386/vm_tuning.h \
i386/i386/xpr.h \
i386/intel/pmap.c \
i386/intel/pmap.h \
@@ -146,6 +170,7 @@ EXTRA_DIST += \
if PLATFORM_at
libkernel_a_SOURCES += \
i386/i386/hardclock.c \
+ i386/i386/hardclock.h \
i386/i386/io_map.c \
i386/i386/pic.c \
i386/i386/pic.h \
@@ -218,7 +243,6 @@ include_mach_i386dir = $(includedir)/mach/i386
include_mach_i386_HEADERS = \
i386/include/mach/i386/asm.h \
i386/include/mach/i386/boolean.h \
- i386/include/mach/i386/disk.h \
i386/include/mach/i386/eflags.h \
i386/include/mach/i386/exception.h \
i386/include/mach/i386/fp_reg.h \
@@ -228,7 +252,6 @@ include_mach_i386_HEADERS = \
i386/include/mach/i386/mach_i386_types.h \
i386/include/mach/i386/machine_types.defs \
i386/include/mach/i386/multiboot.h \
- i386/include/mach/i386/rpc.h \
i386/include/mach/i386/syscall_sw.h \
i386/include/mach/i386/thread_status.h \
i386/include/mach/i386/trap.h \
diff --git a/i386/configfrag.ac b/i386/configfrag.ac
index 1eaabcad..48744b12 100644
--- a/i386/configfrag.ac
+++ b/i386/configfrag.ac
@@ -73,6 +73,10 @@ AC_DEFINE_UNQUOTED([NLPR], [$nlpr], [NLPR])
# Options.
#
+# The immediate console, useful for debugging early system
+# initialization. Disabled by default.
+AC_DEFINE([ENABLE_IMMEDIATE_CONSOLE], [0], [ENABLE_IMMEDIATE_CONSOLE])
+
AC_ARG_ENABLE([lpr],
AS_HELP_STRING([--enable-lpr], [lpr device; on ix86-at enabled by default]))
[case $host_platform:$host_cpu in
diff --git a/i386/grub/acpi.h b/i386/grub/acpi.h
new file mode 100644
index 00000000..2ac2bd6f
--- /dev/null
+++ b/i386/grub/acpi.h
@@ -0,0 +1,220 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2009 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_ACPI_HEADER
+#define GRUB_ACPI_HEADER 1
+
+#ifndef GRUB_DSDT_TEST
+#include <grub/types.h>
+#include <grub/err.h>
+#endif
+
+#define GRUB_RSDP_SIGNATURE "RSD PTR "
+#define GRUB_RSDP_SIGNATURE_SIZE 8
+
+struct grub_acpi_rsdp_v10
+{
+ grub_uint8_t signature[GRUB_RSDP_SIGNATURE_SIZE];
+ grub_uint8_t checksum;
+ grub_uint8_t oemid[6];
+ grub_uint8_t revision;
+ grub_uint32_t rsdt_addr;
+} GRUB_PACKED;
+
+struct grub_acpi_rsdp_v20
+{
+ struct grub_acpi_rsdp_v10 rsdpv1;
+ grub_uint32_t length;
+ grub_uint64_t xsdt_addr;
+ grub_uint8_t checksum;
+ grub_uint8_t reserved[3];
+} GRUB_PACKED;
+
+struct grub_acpi_table_header
+{
+ grub_uint8_t signature[4];
+ grub_uint32_t length;
+ grub_uint8_t revision;
+ grub_uint8_t checksum;
+ grub_uint8_t oemid[6];
+ grub_uint8_t oemtable[8];
+ grub_uint32_t oemrev;
+ grub_uint8_t creator_id[4];
+ grub_uint32_t creator_rev;
+} GRUB_PACKED;
+
+#define GRUB_ACPI_FADT_SIGNATURE "FACP"
+
+struct grub_acpi_fadt
+{
+ struct grub_acpi_table_header hdr;
+ grub_uint32_t facs_addr;
+ grub_uint32_t dsdt_addr;
+ grub_uint8_t somefields1[20];
+ grub_uint32_t pm1a;
+ grub_uint8_t somefields2[64];
+ grub_uint64_t facs_xaddr;
+ grub_uint64_t dsdt_xaddr;
+ grub_uint8_t somefields3[96];
+} GRUB_PACKED;
+
+#define GRUB_ACPI_MADT_SIGNATURE "APIC"
+
+struct grub_acpi_madt_entry_header
+{
+ grub_uint8_t type;
+ grub_uint8_t len;
+};
+
+struct grub_acpi_madt
+{
+ struct grub_acpi_table_header hdr;
+ grub_uint32_t lapic_addr;
+ grub_uint32_t flags;
+ struct grub_acpi_madt_entry_header entries[0];
+};
+
+enum
+ {
+ GRUB_ACPI_MADT_ENTRY_TYPE_LAPIC = 0,
+ GRUB_ACPI_MADT_ENTRY_TYPE_IOAPIC = 1,
+ GRUB_ACPI_MADT_ENTRY_TYPE_INTERRUPT_OVERRIDE = 2,
+ GRUB_ACPI_MADT_ENTRY_TYPE_LAPIC_NMI = 4,
+ GRUB_ACPI_MADT_ENTRY_TYPE_SAPIC = 6,
+ GRUB_ACPI_MADT_ENTRY_TYPE_LSAPIC = 7,
+ GRUB_ACPI_MADT_ENTRY_TYPE_PLATFORM_INT_SOURCE = 8
+ };
+
+struct grub_acpi_madt_entry_lapic
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint8_t acpiid;
+ grub_uint8_t apicid;
+ grub_uint32_t flags;
+};
+
+struct grub_acpi_madt_entry_ioapic
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint8_t id;
+ grub_uint8_t pad;
+ grub_uint32_t address;
+ grub_uint32_t global_sys_interrupt;
+};
+
+struct grub_acpi_madt_entry_interrupt_override
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint8_t bus;
+ grub_uint8_t source;
+ grub_uint32_t global_sys_interrupt;
+ grub_uint16_t flags;
+} GRUB_PACKED;
+
+
+struct grub_acpi_madt_entry_lapic_nmi
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint8_t acpiid;
+ grub_uint16_t flags;
+ grub_uint8_t lint;
+} GRUB_PACKED;
+
+struct grub_acpi_madt_entry_sapic
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint8_t id;
+ grub_uint8_t pad;
+ grub_uint32_t global_sys_interrupt_base;
+ grub_uint64_t addr;
+};
+
+struct grub_acpi_madt_entry_lsapic
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint8_t cpu_id;
+ grub_uint8_t id;
+ grub_uint8_t eid;
+ grub_uint8_t pad[3];
+ grub_uint32_t flags;
+ grub_uint32_t cpu_uid;
+ grub_uint8_t cpu_uid_str[0];
+};
+
+struct grub_acpi_madt_entry_platform_int_source
+{
+ struct grub_acpi_madt_entry_header hdr;
+ grub_uint16_t flags;
+ grub_uint8_t inttype;
+ grub_uint8_t cpu_id;
+ grub_uint8_t cpu_eid;
+ grub_uint8_t sapic_vector;
+ grub_uint32_t global_sys_int;
+ grub_uint32_t src_flags;
+};
+
+enum
+ {
+ GRUB_ACPI_MADT_ENTRY_SAPIC_FLAGS_ENABLED = 1
+ };
+
+#ifndef GRUB_DSDT_TEST
+struct grub_acpi_rsdp_v10 *grub_acpi_get_rsdpv1 (void);
+struct grub_acpi_rsdp_v20 *grub_acpi_get_rsdpv2 (void);
+struct grub_acpi_rsdp_v10 *grub_machine_acpi_get_rsdpv1 (void);
+struct grub_acpi_rsdp_v20 *grub_machine_acpi_get_rsdpv2 (void);
+grub_uint8_t grub_byte_checksum (void *base, grub_size_t size);
+
+grub_err_t grub_acpi_create_ebda (void);
+
+void grub_acpi_halt (void);
+#endif
+
+#define GRUB_ACPI_SLP_EN (1 << 13)
+#define GRUB_ACPI_SLP_TYP_OFFSET 10
+
+enum
+ {
+ GRUB_ACPI_OPCODE_ZERO = 0, GRUB_ACPI_OPCODE_ONE = 1,
+ GRUB_ACPI_OPCODE_NAME = 8, GRUB_ACPI_OPCODE_BYTE_CONST = 0x0a,
+ GRUB_ACPI_OPCODE_WORD_CONST = 0x0b,
+ GRUB_ACPI_OPCODE_DWORD_CONST = 0x0c,
+ GRUB_ACPI_OPCODE_STRING_CONST = 0x0d,
+ GRUB_ACPI_OPCODE_SCOPE = 0x10,
+ GRUB_ACPI_OPCODE_BUFFER = 0x11,
+ GRUB_ACPI_OPCODE_PACKAGE = 0x12,
+ GRUB_ACPI_OPCODE_METHOD = 0x14, GRUB_ACPI_OPCODE_EXTOP = 0x5b,
+ GRUB_ACPI_OPCODE_CREATE_WORD_FIELD = 0x8b,
+ GRUB_ACPI_OPCODE_CREATE_BYTE_FIELD = 0x8c,
+ GRUB_ACPI_OPCODE_IF = 0xa0, GRUB_ACPI_OPCODE_ONES = 0xff
+ };
+enum
+ {
+ GRUB_ACPI_EXTOPCODE_MUTEX = 0x01,
+ GRUB_ACPI_EXTOPCODE_EVENT_OP = 0x02,
+ GRUB_ACPI_EXTOPCODE_OPERATION_REGION = 0x80,
+ GRUB_ACPI_EXTOPCODE_FIELD_OP = 0x81,
+ GRUB_ACPI_EXTOPCODE_DEVICE_OP = 0x82,
+ GRUB_ACPI_EXTOPCODE_PROCESSOR_OP = 0x83,
+ GRUB_ACPI_EXTOPCODE_POWER_RES_OP = 0x84,
+ GRUB_ACPI_EXTOPCODE_THERMAL_ZONE_OP = 0x85,
+ GRUB_ACPI_EXTOPCODE_INDEX_FIELD_OP = 0x86,
+ GRUB_ACPI_EXTOPCODE_BANK_FIELD_OP = 0x87,
+ };
+
+#endif /* ! GRUB_ACPI_HEADER */
diff --git a/i386/grub/compiler.h b/i386/grub/compiler.h
new file mode 100644
index 00000000..c9e1d7a7
--- /dev/null
+++ b/i386/grub/compiler.h
@@ -0,0 +1,51 @@
+/* compiler.h - macros for various compiler features */
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2002,2003,2005,2006,2007,2008,2009,2010,2014 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_COMPILER_HEADER
+#define GRUB_COMPILER_HEADER 1
+
+/* GCC version checking borrowed from glibc. */
+#if defined(__GNUC__) && defined(__GNUC_MINOR__)
+# define GNUC_PREREQ(maj,min) \
+ ((__GNUC__ << 16) + __GNUC_MINOR__ >= ((maj) << 16) + (min))
+#else
+# define GNUC_PREREQ(maj,min) 0
+#endif
+
+/* Does this compiler support compile-time error attributes? */
+#if GNUC_PREREQ(4,3)
+# define ATTRIBUTE_ERROR(msg) \
+ __attribute__ ((__error__ (msg)))
+#else
+# define ATTRIBUTE_ERROR(msg) __attribute__ ((noreturn))
+#endif
+
+#if GNUC_PREREQ(4,4)
+# define GNU_PRINTF gnu_printf
+#else
+# define GNU_PRINTF printf
+#endif
+
+#if GNUC_PREREQ(3,4)
+# define WARN_UNUSED_RESULT __attribute__ ((warn_unused_result))
+#else
+# define WARN_UNUSED_RESULT
+#endif
+
+#endif /* ! GRUB_COMPILER_HEADER */
diff --git a/i386/grub/cpu/io.h b/i386/grub/cpu/io.h
new file mode 100644
index 00000000..ae12a3e3
--- /dev/null
+++ b/i386/grub/cpu/io.h
@@ -0,0 +1,72 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 1996,2000,2002,2007 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* Based on sys/io.h from GNU libc. */
+
+#ifndef GRUB_IO_H
+#define GRUB_IO_H 1
+
+typedef unsigned short int grub_port_t;
+
+static __inline unsigned char
+grub_inb (unsigned short int port)
+{
+ unsigned char _v;
+
+ __asm__ __volatile__ ("inb %w1,%0":"=a" (_v):"Nd" (port));
+ return _v;
+}
+
+static __inline unsigned short int
+grub_inw (unsigned short int port)
+{
+ unsigned short _v;
+
+ __asm__ __volatile__ ("inw %w1,%0":"=a" (_v):"Nd" (port));
+ return _v;
+}
+
+static __inline unsigned int
+grub_inl (unsigned short int port)
+{
+ unsigned int _v;
+
+ __asm__ __volatile__ ("inl %w1,%0":"=a" (_v):"Nd" (port));
+ return _v;
+}
+
+static __inline void
+grub_outb (unsigned char value, unsigned short int port)
+{
+ __asm__ __volatile__ ("outb %b0,%w1": :"a" (value), "Nd" (port));
+}
+
+static __inline void
+grub_outw (unsigned short int value, unsigned short int port)
+{
+ __asm__ __volatile__ ("outw %w0,%w1": :"a" (value), "Nd" (port));
+
+}
+
+static __inline void
+grub_outl (unsigned int value, unsigned short int port)
+{
+ __asm__ __volatile__ ("outl %0,%w1": :"a" (value), "Nd" (port));
+}
+
+#endif /* _SYS_IO_H */
diff --git a/i386/grub/cpu/time.h b/i386/grub/cpu/time.h
new file mode 100644
index 00000000..842882cf
--- /dev/null
+++ b/i386/grub/cpu/time.h
@@ -0,0 +1,29 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2007 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef KERNEL_CPU_TIME_HEADER
+#define KERNEL_CPU_TIME_HEADER 1
+
+static __inline void
+grub_cpu_idle (void)
+{
+ /* FIXME: this can't work until we handle interrupts. */
+/* __asm__ __volatile__ ("hlt"); */
+}
+
+#endif /* ! KERNEL_CPU_TIME_HEADER */
diff --git a/i386/grub/cpu/types.h b/i386/grub/cpu/types.h
new file mode 100644
index 00000000..c20063f3
--- /dev/null
+++ b/i386/grub/cpu/types.h
@@ -0,0 +1,33 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2002,2006,2007 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_TYPES_CPU_HEADER
+#define GRUB_TYPES_CPU_HEADER 1
+
+/* The size of void *. */
+#define GRUB_TARGET_SIZEOF_VOID_P 4
+
+/* The size of long. */
+#define GRUB_TARGET_SIZEOF_LONG 4
+
+/* i386 is little-endian. */
+#undef GRUB_TARGET_WORDS_BIGENDIAN
+
+#define GRUB_HAVE_UNALIGNED_ACCESS 1
+
+#endif /* ! GRUB_TYPES_CPU_HEADER */
diff --git a/i386/grub/err.h b/i386/grub/err.h
new file mode 100644
index 00000000..1590c688
--- /dev/null
+++ b/i386/grub/err.h
@@ -0,0 +1,96 @@
+/* err.h - error numbers and prototypes */
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2002,2005,2007,2008 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_ERR_HEADER
+#define GRUB_ERR_HEADER 1
+
+#include <grub/symbol.h>
+
+#define GRUB_MAX_ERRMSG 256
+
+typedef enum
+ {
+ GRUB_ERR_NONE = 0,
+ GRUB_ERR_TEST_FAILURE,
+ GRUB_ERR_BAD_MODULE,
+ GRUB_ERR_OUT_OF_MEMORY,
+ GRUB_ERR_BAD_FILE_TYPE,
+ GRUB_ERR_FILE_NOT_FOUND,
+ GRUB_ERR_FILE_READ_ERROR,
+ GRUB_ERR_BAD_FILENAME,
+ GRUB_ERR_UNKNOWN_FS,
+ GRUB_ERR_BAD_FS,
+ GRUB_ERR_BAD_NUMBER,
+ GRUB_ERR_OUT_OF_RANGE,
+ GRUB_ERR_UNKNOWN_DEVICE,
+ GRUB_ERR_BAD_DEVICE,
+ GRUB_ERR_READ_ERROR,
+ GRUB_ERR_WRITE_ERROR,
+ GRUB_ERR_UNKNOWN_COMMAND,
+ GRUB_ERR_INVALID_COMMAND,
+ GRUB_ERR_BAD_ARGUMENT,
+ GRUB_ERR_BAD_PART_TABLE,
+ GRUB_ERR_UNKNOWN_OS,
+ GRUB_ERR_BAD_OS,
+ GRUB_ERR_NO_KERNEL,
+ GRUB_ERR_BAD_FONT,
+ GRUB_ERR_NOT_IMPLEMENTED_YET,
+ GRUB_ERR_SYMLINK_LOOP,
+ GRUB_ERR_BAD_COMPRESSED_DATA,
+ GRUB_ERR_MENU,
+ GRUB_ERR_TIMEOUT,
+ GRUB_ERR_IO,
+ GRUB_ERR_ACCESS_DENIED,
+ GRUB_ERR_EXTRACTOR,
+ GRUB_ERR_NET_BAD_ADDRESS,
+ GRUB_ERR_NET_ROUTE_LOOP,
+ GRUB_ERR_NET_NO_ROUTE,
+ GRUB_ERR_NET_NO_ANSWER,
+ GRUB_ERR_NET_NO_CARD,
+ GRUB_ERR_WAIT,
+ GRUB_ERR_BUG,
+ GRUB_ERR_NET_PORT_CLOSED,
+ GRUB_ERR_NET_INVALID_RESPONSE,
+ GRUB_ERR_NET_UNKNOWN_ERROR,
+ GRUB_ERR_NET_PACKET_TOO_BIG,
+ GRUB_ERR_NET_NO_DOMAIN,
+ GRUB_ERR_EOF,
+ GRUB_ERR_BAD_SIGNATURE
+ }
+grub_err_t;
+
+struct grub_error_saved
+{
+ grub_err_t grub_errno;
+ char errmsg[GRUB_MAX_ERRMSG];
+};
+
+extern grub_err_t EXPORT_VAR(grub_errno);
+extern char EXPORT_VAR(grub_errmsg)[GRUB_MAX_ERRMSG];
+
+grub_err_t EXPORT_FUNC(grub_error) (grub_err_t n, const char *fmt, ...);
+void EXPORT_FUNC(grub_fatal) (const char *fmt, ...) __attribute__ ((noreturn));
+void EXPORT_FUNC(grub_error_push) (void);
+int EXPORT_FUNC(grub_error_pop) (void);
+void EXPORT_FUNC(grub_print_error) (void);
+extern int EXPORT_VAR(grub_err_printed_errors);
+int grub_err_printf (const char *fmt, ...)
+ __attribute__ ((format (__printf__, 1, 2)));
+
+#endif /* ! GRUB_ERR_HEADER */
diff --git a/i386/grub/glue.h b/i386/grub/glue.h
new file mode 100644
index 00000000..ae41014d
--- /dev/null
+++ b/i386/grub/glue.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2014 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _GRUB_GLUE_H
+#define _GRUB_GLUE_H
+
+#define GRUB_FILE __FILE__
+#define grub_memcmp memcmp
+#define grub_printf printf
+#define grub_puts_ puts
+
+#include <mach/mach_types.h>
+#include <i386/vm_param.h>
+
+/* Warning: this leaks memory maps for now, do not use it yet for something
+ * else than Mach shutdown. */
+vm_offset_t io_map_cached(vm_offset_t phys_addr, vm_size_t size);
+
+#endif /* _GRUB_GLUE_H */
diff --git a/i386/grub/i18n.h b/i386/grub/i18n.h
new file mode 100644
index 00000000..8b533571
--- /dev/null
+++ b/i386/grub/i18n.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2014 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _GRUB_I18N_H
+#define _GRUB_I18N_H
+
+/* No i18n please. */
+#define _(x) x
+#define N_(x) x
+
+#endif /* _GRUB_I18N_H */
diff --git a/i386/grub/misc.h b/i386/grub/misc.h
new file mode 100644
index 00000000..b71140a5
--- /dev/null
+++ b/i386/grub/misc.h
@@ -0,0 +1,517 @@
+/* misc.h - prototypes for misc functions */
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2002,2003,2005,2006,2007,2008,2009,2010 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_MISC_HEADER
+#define GRUB_MISC_HEADER 1
+
+#include <stdarg.h>
+#include <kern/macros.h>
+#include <grub/types.h>
+#include <grub/symbol.h>
+#include <grub/err.h>
+#include <grub/i18n.h>
+#include <grub/compiler.h>
+
+#define ALIGN_UP(addr, align) \
+ ((addr + (typeof (addr)) align - 1) & ~((typeof (addr)) align - 1))
+#define ALIGN_UP_OVERHEAD(addr, align) ((-(addr)) & ((typeof (addr)) (align) - 1))
+#define ALIGN_DOWN(addr, align) \
+ ((addr) & ~((typeof (addr)) align - 1))
+#define COMPILE_TIME_ASSERT(cond) switch (0) { case 1: case !(cond): ; }
+
+#define grub_dprintf(condition, ...) grub_real_dprintf(GRUB_FILE, __LINE__, condition, __VA_ARGS__)
+
+void *EXPORT_FUNC(grub_memmove) (void *dest, const void *src, grub_size_t n);
+char *EXPORT_FUNC(grub_strcpy) (char *dest, const char *src);
+
+static inline char *
+grub_strncpy (char *dest, const char *src, int c)
+{
+ char *p = dest;
+
+ while ((*p++ = *src++) != '\0' && --c)
+ ;
+
+ return dest;
+}
+
+static inline char *
+grub_stpcpy (char *dest, const char *src)
+{
+ char *d = dest;
+ const char *s = src;
+
+ do
+ *d++ = *s;
+ while (*s++ != '\0');
+
+ return d - 1;
+}
+
+/* XXX: If grub_memmove is too slow, we must implement grub_memcpy. */
+static inline void *
+grub_memcpy (void *dest, const void *src, grub_size_t n)
+{
+ return grub_memmove (dest, src, n);
+}
+
+#if defined (__APPLE__) && defined(__i386__) && !defined (GRUB_UTIL)
+#define GRUB_BUILTIN_ATTR __attribute__ ((regparm(0)))
+#else
+#define GRUB_BUILTIN_ATTR
+#endif
+
+#if defined(__x86_64__) && !defined (GRUB_UTIL)
+#if defined (__MINGW32__) || defined (__CYGWIN__) || defined (__MINGW64__)
+#define GRUB_ASM_ATTR __attribute__ ((sysv_abi))
+#else
+#define GRUB_ASM_ATTR
+#endif
+#endif
+
+/* Prototypes for aliases. */
+#ifndef GRUB_UTIL
+int GRUB_BUILTIN_ATTR EXPORT_FUNC(memcmp) (const void *s1, const void *s2, grub_size_t n);
+void *GRUB_BUILTIN_ATTR EXPORT_FUNC(memmove) (void *dest, const void *src, grub_size_t n);
+void *GRUB_BUILTIN_ATTR EXPORT_FUNC(memcpy) (void *dest, const void *src, grub_size_t n);
+void *GRUB_BUILTIN_ATTR EXPORT_FUNC(memset) (void *s, int c, grub_size_t n);
+
+#ifdef __APPLE__
+void GRUB_BUILTIN_ATTR EXPORT_FUNC (__bzero) (void *s, grub_size_t n);
+#endif
+
+#endif
+
+int EXPORT_FUNC(grub_memcmp) (const void *s1, const void *s2, grub_size_t n);
+int EXPORT_FUNC(grub_strcmp) (const char *s1, const char *s2);
+int EXPORT_FUNC(grub_strncmp) (const char *s1, const char *s2, grub_size_t n);
+
+char *EXPORT_FUNC(grub_strchr) (const char *s, int c);
+char *EXPORT_FUNC(grub_strrchr) (const char *s, int c);
+int EXPORT_FUNC(grub_strword) (const char *s, const char *w);
+
+/* Copied from gnulib.
+ Written by Bruno Haible <bruno@clisp.org>, 2005. */
+static inline char *
+grub_strstr (const char *haystack, const char *needle)
+{
+ /* Be careful not to look at the entire extent of haystack or needle
+ until needed. This is useful because of these two cases:
+ - haystack may be very long, and a match of needle found early,
+ - needle may be very long, and not even a short initial segment of
+ needle may be found in haystack. */
+ if (*needle != '\0')
+ {
+ /* Speed up the following searches of needle by caching its first
+ character. */
+ char b = *needle++;
+
+ for (;; haystack++)
+ {
+ if (*haystack == '\0')
+ /* No match. */
+ return 0;
+ if (*haystack == b)
+ /* The first character matches. */
+ {
+ const char *rhaystack = haystack + 1;
+ const char *rneedle = needle;
+
+ for (;; rhaystack++, rneedle++)
+ {
+ if (*rneedle == '\0')
+ /* Found a match. */
+ return (char *) haystack;
+ if (*rhaystack == '\0')
+ /* No match. */
+ return 0;
+ if (*rhaystack != *rneedle)
+ /* Nothing in this round. */
+ break;
+ }
+ }
+ }
+ }
+ else
+ return (char *) haystack;
+}
+
+int EXPORT_FUNC(grub_isspace) (int c);
+
+static inline int
+grub_isprint (int c)
+{
+ return (c >= ' ' && c <= '~');
+}
+
+static inline int
+grub_iscntrl (int c)
+{
+ return (c >= 0x00 && c <= 0x1F) || c == 0x7F;
+}
+
+static inline int
+grub_isalpha (int c)
+{
+ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z');
+}
+
+static inline int
+grub_islower (int c)
+{
+ return (c >= 'a' && c <= 'z');
+}
+
+static inline int
+grub_isupper (int c)
+{
+ return (c >= 'A' && c <= 'Z');
+}
+
+static inline int
+grub_isgraph (int c)
+{
+ return (c >= '!' && c <= '~');
+}
+
+static inline int
+grub_isdigit (int c)
+{
+ return (c >= '0' && c <= '9');
+}
+
+static inline int
+grub_isxdigit (int c)
+{
+ return (c >= '0' && c <= '9') || (c >= 'a' && c <= 'f') || (c >= 'A' && c <= 'F');
+}
+
+static inline int
+grub_isalnum (int c)
+{
+ return grub_isalpha (c) || grub_isdigit (c);
+}
+
+static inline int
+grub_tolower (int c)
+{
+ if (c >= 'A' && c <= 'Z')
+ return c - 'A' + 'a';
+
+ return c;
+}
+
+static inline int
+grub_toupper (int c)
+{
+ if (c >= 'a' && c <= 'z')
+ return c - 'a' + 'A';
+
+ return c;
+}
+
+static inline int
+grub_strcasecmp (const char *s1, const char *s2)
+{
+ while (*s1 && *s2)
+ {
+ if (grub_tolower ((grub_uint8_t) *s1)
+ != grub_tolower ((grub_uint8_t) *s2))
+ break;
+
+ s1++;
+ s2++;
+ }
+
+ return (int) grub_tolower ((grub_uint8_t) *s1)
+ - (int) grub_tolower ((grub_uint8_t) *s2);
+}
+
+static inline int
+grub_strncasecmp (const char *s1, const char *s2, grub_size_t n)
+{
+ if (n == 0)
+ return 0;
+
+ while (*s1 && *s2 && --n)
+ {
+ if (grub_tolower (*s1) != grub_tolower (*s2))
+ break;
+
+ s1++;
+ s2++;
+ }
+
+ return (int) grub_tolower ((grub_uint8_t) *s1)
+ - (int) grub_tolower ((grub_uint8_t) *s2);
+}
+
+unsigned long EXPORT_FUNC(grub_strtoul) (const char *str, char **end, int base);
+unsigned long long EXPORT_FUNC(grub_strtoull) (const char *str, char **end, int base);
+
+static inline long
+grub_strtol (const char *str, char **end, int base)
+{
+ int negative = 0;
+ unsigned long long magnitude;
+
+ while (*str && grub_isspace (*str))
+ str++;
+
+ if (*str == '-')
+ {
+ negative = 1;
+ str++;
+ }
+
+ magnitude = grub_strtoull (str, end, base);
+ if (negative)
+ {
+ if (magnitude > (unsigned long) GRUB_LONG_MAX + 1)
+ {
+ grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected"));
+ return GRUB_LONG_MIN;
+ }
+ return -((long) magnitude);
+ }
+ else
+ {
+ if (magnitude > GRUB_LONG_MAX)
+ {
+ grub_error (GRUB_ERR_OUT_OF_RANGE, N_("overflow is detected"));
+ return GRUB_LONG_MAX;
+ }
+ return (long) magnitude;
+ }
+}
+
+char *EXPORT_FUNC(grub_strdup) (const char *s) WARN_UNUSED_RESULT;
+char *EXPORT_FUNC(grub_strndup) (const char *s, grub_size_t n) WARN_UNUSED_RESULT;
+void *EXPORT_FUNC(grub_memset) (void *s, int c, grub_size_t n);
+grub_size_t EXPORT_FUNC(grub_strlen) (const char *s) WARN_UNUSED_RESULT;
+int EXPORT_FUNC(grub_printf) (const char *fmt, ...) __attribute__ ((format (GNU_PRINTF, 1, 2)));
+int EXPORT_FUNC(grub_printf_) (const char *fmt, ...) __attribute__ ((format (GNU_PRINTF, 1, 2)));
+
+/* Replace all `ch' characters of `input' with `with' and copy the
+ result into `output'; return EOS address of `output'. */
+static inline char *
+grub_strchrsub (char *output, const char *input, char ch, const char *with)
+{
+ while (*input)
+ {
+ if (*input == ch)
+ {
+ grub_strcpy (output, with);
+ output += grub_strlen (with);
+ input++;
+ continue;
+ }
+ *output++ = *input++;
+ }
+ *output = '\0';
+ return output;
+}
+
+extern void (*EXPORT_VAR (grub_xputs)) (const char *str);
+
+static inline int
+grub_puts (const char *s)
+{
+ const char nl[2] = "\n";
+ grub_xputs (s);
+ grub_xputs (nl);
+
+ return 1; /* Cannot fail. */
+}
+
+int EXPORT_FUNC(grub_puts_) (const char *s);
+void EXPORT_FUNC(grub_real_dprintf) (const char *file,
+ const int line,
+ const char *condition,
+ const char *fmt, ...) __attribute__ ((format (GNU_PRINTF, 4, 5)));
+int EXPORT_FUNC(grub_vprintf) (const char *fmt, va_list args);
+int EXPORT_FUNC(grub_snprintf) (char *str, grub_size_t n, const char *fmt, ...)
+ __attribute__ ((format (GNU_PRINTF, 3, 4)));
+int EXPORT_FUNC(grub_vsnprintf) (char *str, grub_size_t n, const char *fmt,
+ va_list args);
+char *EXPORT_FUNC(grub_xasprintf) (const char *fmt, ...)
+ __attribute__ ((format (GNU_PRINTF, 1, 2))) WARN_UNUSED_RESULT;
+char *EXPORT_FUNC(grub_xvasprintf) (const char *fmt, va_list args) WARN_UNUSED_RESULT;
+void EXPORT_FUNC(grub_exit) (void) __attribute__ ((noreturn));
+grub_uint64_t EXPORT_FUNC(grub_divmod64) (grub_uint64_t n,
+ grub_uint64_t d,
+ grub_uint64_t *r);
+
+#if (defined (__MINGW32__) || defined (__CYGWIN__)) && !defined(GRUB_UTIL)
+void EXPORT_FUNC (__register_frame_info) (void);
+void EXPORT_FUNC (__deregister_frame_info) (void);
+void EXPORT_FUNC (___chkstk_ms) (void);
+void EXPORT_FUNC (__chkstk_ms) (void);
+#endif
+
+/* Inline functions. */
+
+static inline char *
+grub_memchr (const void *p, int c, grub_size_t len)
+{
+ const char *s = (const char *) p;
+ const char *e = s + len;
+
+ for (; s < e; s++)
+ if (*s == c)
+ return (char *) s;
+
+ return 0;
+}
+
+
+static inline unsigned int
+grub_abs (int x)
+{
+ if (x < 0)
+ return (unsigned int) (-x);
+ else
+ return (unsigned int) x;
+}
+
+/* Rounded-up division */
+static inline unsigned int
+grub_div_roundup (unsigned int x, unsigned int y)
+{
+ return (x + y - 1) / y;
+}
+
+/* Reboot the machine. */
+#if defined (GRUB_MACHINE_EMU) || defined (GRUB_MACHINE_QEMU_MIPS)
+void EXPORT_FUNC(grub_reboot) (void) __attribute__ ((noreturn));
+#else
+void grub_reboot (void) __attribute__ ((noreturn));
+#endif
+
+#if defined (__clang__) && !defined (GRUB_UTIL)
+void __attribute__ ((noreturn)) EXPORT_FUNC (abort) (void);
+#endif
+
+#ifdef GRUB_MACHINE_PCBIOS
+/* Halt the system, using APM if possible. If NO_APM is true, don't
+ * use APM even if it is available. */
+void grub_halt (int no_apm) __attribute__ ((noreturn));
+#elif defined (__mips__) && !defined (GRUB_MACHINE_EMU)
+void EXPORT_FUNC (grub_halt) (void) __attribute__ ((noreturn));
+#else
+void grub_halt (void) __attribute__ ((noreturn));
+#endif
+
+#ifdef GRUB_MACHINE_EMU
+/* Flag to check if module loading is available. */
+extern const int EXPORT_VAR(grub_no_modules);
+#else
+#define grub_no_modules 0
+#endif
+
+static inline void
+grub_error_save (struct grub_error_saved *save)
+{
+ grub_memcpy (save->errmsg, grub_errmsg, sizeof (save->errmsg));
+ save->grub_errno = grub_errno;
+ grub_errno = GRUB_ERR_NONE;
+}
+
+static inline void
+grub_error_load (const struct grub_error_saved *save)
+{
+ grub_memcpy (grub_errmsg, save->errmsg, sizeof (grub_errmsg));
+ grub_errno = save->grub_errno;
+}
+
+#ifndef GRUB_UTIL
+
+#if defined (__arm__)
+
+grub_uint32_t
+EXPORT_FUNC (__udivsi3) (grub_uint32_t a, grub_uint32_t b);
+
+grub_uint32_t
+EXPORT_FUNC (__umodsi3) (grub_uint32_t a, grub_uint32_t b);
+
+#endif
+
+#if defined (__sparc__) || defined (__powerpc__)
+unsigned
+EXPORT_FUNC (__ctzdi2) (grub_uint64_t x);
+#define NEED_CTZDI2 1
+#endif
+
+#if defined (__mips__) || defined (__arm__)
+unsigned
+EXPORT_FUNC (__ctzsi2) (grub_uint32_t x);
+#define NEED_CTZSI2 1
+#endif
+
+#ifdef __arm__
+grub_uint32_t
+EXPORT_FUNC (__aeabi_uidiv) (grub_uint32_t a, grub_uint32_t b);
+grub_uint32_t
+EXPORT_FUNC (__aeabi_uidivmod) (grub_uint32_t a, grub_uint32_t b);
+
+/* Needed for allowing modules to be compiled as thumb. */
+grub_uint64_t
+EXPORT_FUNC (__muldi3) (grub_uint64_t a, grub_uint64_t b);
+grub_uint64_t
+EXPORT_FUNC (__aeabi_lmul) (grub_uint64_t a, grub_uint64_t b);
+
+#endif
+
+#if defined (__ia64__)
+
+grub_uint64_t
+EXPORT_FUNC (__udivdi3) (grub_uint64_t a, grub_uint64_t b);
+
+grub_uint64_t
+EXPORT_FUNC (__umoddi3) (grub_uint64_t a, grub_uint64_t b);
+
+#endif
+
+#endif /* GRUB_UTIL */
+
+
+#if BOOT_TIME_STATS
+struct grub_boot_time
+{
+ struct grub_boot_time *next;
+ grub_uint64_t tp;
+ const char *file;
+ int line;
+ char *msg;
+};
+
+extern struct grub_boot_time *EXPORT_VAR(grub_boot_time_head);
+
+void EXPORT_FUNC(grub_real_boot_time) (const char *file,
+ const int line,
+ const char *fmt, ...) __attribute__ ((format (GNU_PRINTF, 3, 4)));
+#define grub_boot_time(...) grub_real_boot_time(GRUB_FILE, __LINE__, __VA_ARGS__)
+#else
+#define grub_boot_time(...)
+#endif
+
+#define grub_max(a, b) (((a) > (b)) ? (a) : (b))
+#define grub_min(a, b) (((a) < (b)) ? (a) : (b))
+
+#endif /* ! GRUB_MISC_HEADER */
diff --git a/i386/grub/mm.h b/i386/grub/mm.h
new file mode 100644
index 00000000..28e2e53e
--- /dev/null
+++ b/i386/grub/mm.h
@@ -0,0 +1,77 @@
+/* mm.h - prototypes and declarations for memory manager */
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2002,2007 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_MM_H
+#define GRUB_MM_H 1
+
+#include <grub/types.h>
+#include <grub/symbol.h>
+#include <config.h>
+
+#ifndef NULL
+# define NULL ((void *) 0)
+#endif
+
+void grub_mm_init_region (void *addr, grub_size_t size);
+void *EXPORT_FUNC(grub_malloc) (grub_size_t size);
+void *EXPORT_FUNC(grub_zalloc) (grub_size_t size);
+void EXPORT_FUNC(grub_free) (void *ptr);
+void *EXPORT_FUNC(grub_realloc) (void *ptr, grub_size_t size);
+#ifndef GRUB_MACHINE_EMU
+void *EXPORT_FUNC(grub_memalign) (grub_size_t align, grub_size_t size);
+#endif
+
+void grub_mm_check_real (const char *file, int line);
+#define grub_mm_check() grub_mm_check_real (GRUB_FILE, __LINE__);
+
+/* For debugging. */
+#if defined(MM_DEBUG) && !defined(GRUB_UTIL) && !defined (GRUB_MACHINE_EMU)
+/* Set this variable to 1 when you want to trace all memory function calls. */
+extern int EXPORT_VAR(grub_mm_debug);
+
+void grub_mm_dump_free (void);
+void grub_mm_dump (unsigned lineno);
+
+#define grub_malloc(size) \
+ grub_debug_malloc (GRUB_FILE, __LINE__, size)
+
+#define grub_zalloc(size) \
+ grub_debug_zalloc (GRUB_FILE, __LINE__, size)
+
+#define grub_realloc(ptr,size) \
+ grub_debug_realloc (GRUB_FILE, __LINE__, ptr, size)
+
+#define grub_memalign(align,size) \
+ grub_debug_memalign (GRUB_FILE, __LINE__, align, size)
+
+#define grub_free(ptr) \
+ grub_debug_free (GRUB_FILE, __LINE__, ptr)
+
+void *EXPORT_FUNC(grub_debug_malloc) (const char *file, int line,
+ grub_size_t size);
+void *EXPORT_FUNC(grub_debug_zalloc) (const char *file, int line,
+ grub_size_t size);
+void EXPORT_FUNC(grub_debug_free) (const char *file, int line, void *ptr);
+void *EXPORT_FUNC(grub_debug_realloc) (const char *file, int line, void *ptr,
+ grub_size_t size);
+void *EXPORT_FUNC(grub_debug_memalign) (const char *file, int line,
+ grub_size_t align, grub_size_t size);
+#endif /* MM_DEBUG && ! GRUB_UTIL */
+
+#endif /* ! GRUB_MM_H */
diff --git a/i386/grub/symbol.h b/i386/grub/symbol.h
new file mode 100644
index 00000000..ed19f70d
--- /dev/null
+++ b/i386/grub/symbol.h
@@ -0,0 +1,72 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 1999,2000,2001,2002,2006,2007,2008,2009 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_SYMBOL_HEADER
+#define GRUB_SYMBOL_HEADER 1
+
+#include <config.h>
+
+/* Apple assembler requires local labels to start with a capital L */
+#define LOCAL(sym) L_ ## sym
+
+/* Add an underscore to a C symbol in assembler code if needed. */
+#ifndef GRUB_UTIL
+
+#ifdef __APPLE__
+#define MACRO_DOLLAR(x) $$ ## x
+#else
+#define MACRO_DOLLAR(x) $ ## x
+#endif
+
+#if HAVE_ASM_USCORE
+#ifdef ASM_FILE
+# define EXT_C(sym) _ ## sym
+#else
+# define EXT_C(sym) "_" sym
+#endif
+#else
+# define EXT_C(sym) sym
+#endif
+
+#ifdef __arm__
+#define END .end
+#endif
+
+#if defined (__APPLE__)
+#define FUNCTION(x) .globl EXT_C(x) ; EXT_C(x):
+#define VARIABLE(x) .globl EXT_C(x) ; EXT_C(x):
+#elif defined (__CYGWIN__) || defined (__MINGW32__)
+/* .type not supported for non-ELF targets. XXX: Check this in configure? */
+#define FUNCTION(x) .globl EXT_C(x) ; .def EXT_C(x); .scl 2; .type 32; .endef; EXT_C(x):
+#define VARIABLE(x) .globl EXT_C(x) ; .def EXT_C(x); .scl 2; .type 0; .endef; EXT_C(x):
+#elif defined (__arm__)
+#define FUNCTION(x) .globl EXT_C(x) ; .type EXT_C(x), %function ; EXT_C(x):
+#define VARIABLE(x) .globl EXT_C(x) ; .type EXT_C(x), %object ; EXT_C(x):
+#else
+#define FUNCTION(x) .globl EXT_C(x) ; .type EXT_C(x), @function ; EXT_C(x):
+#define VARIABLE(x) .globl EXT_C(x) ; .type EXT_C(x), @object ; EXT_C(x):
+#endif
+#endif
+
+/* Mark an exported symbol. */
+#ifndef GRUB_SYMBOL_GENERATOR
+# define EXPORT_FUNC(x) x
+# define EXPORT_VAR(x) x
+#endif /* ! GRUB_SYMBOL_GENERATOR */
+
+#endif /* ! GRUB_SYMBOL_HEADER */
diff --git a/i386/grub/time.h b/i386/grub/time.h
new file mode 100644
index 00000000..64ac99a1
--- /dev/null
+++ b/i386/grub/time.h
@@ -0,0 +1,46 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2007, 2008 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef KERNEL_TIME_HEADER
+#define KERNEL_TIME_HEADER 1
+
+#include <grub/types.h>
+#include <grub/symbol.h>
+#ifndef GRUB_MACHINE_EMU
+#include <grub/cpu/time.h>
+#else
+static inline void
+grub_cpu_idle(void)
+{
+}
+#endif
+
+void EXPORT_FUNC(grub_millisleep) (grub_uint32_t ms);
+grub_uint64_t EXPORT_FUNC(grub_get_time_ms) (void);
+
+grub_uint64_t grub_rtc_get_time_ms (void);
+
+static __inline void
+grub_sleep (grub_uint32_t s)
+{
+ grub_millisleep (1000 * s);
+}
+
+void grub_install_get_time_ms (grub_uint64_t (*get_time_ms_func) (void));
+
+#endif /* ! KERNEL_TIME_HEADER */
diff --git a/i386/grub/types.h b/i386/grub/types.h
new file mode 100644
index 00000000..79f765c6
--- /dev/null
+++ b/i386/grub/types.h
@@ -0,0 +1,325 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2002,2005,2006,2007,2008,2009 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef GRUB_TYPES_HEADER
+#define GRUB_TYPES_HEADER 1
+
+#include <config.h>
+#ifndef GRUB_UTIL
+#include <grub/cpu/types.h>
+#endif
+
+#ifdef __MINGW32__
+#define GRUB_PACKED __attribute__ ((packed,gcc_struct))
+#else
+#define GRUB_PACKED __attribute__ ((packed))
+#endif
+
+#ifdef GRUB_BUILD
+# define GRUB_CPU_SIZEOF_VOID_P BUILD_SIZEOF_VOID_P
+# define GRUB_CPU_SIZEOF_LONG BUILD_SIZEOF_LONG
+# if BUILD_WORDS_BIGENDIAN
+# define GRUB_CPU_WORDS_BIGENDIAN 1
+# else
+# undef GRUB_CPU_WORDS_BIGENDIAN
+# endif
+#elif defined (GRUB_UTIL)
+# define GRUB_CPU_SIZEOF_VOID_P SIZEOF_VOID_P
+# define GRUB_CPU_SIZEOF_LONG SIZEOF_LONG
+# ifdef WORDS_BIGENDIAN
+# define GRUB_CPU_WORDS_BIGENDIAN 1
+# else
+# undef GRUB_CPU_WORDS_BIGENDIAN
+# endif
+#else /* ! GRUB_UTIL */
+# define GRUB_CPU_SIZEOF_VOID_P GRUB_TARGET_SIZEOF_VOID_P
+# define GRUB_CPU_SIZEOF_LONG GRUB_TARGET_SIZEOF_LONG
+# ifdef GRUB_TARGET_WORDS_BIGENDIAN
+# define GRUB_CPU_WORDS_BIGENDIAN 1
+# else
+# undef GRUB_CPU_WORDS_BIGENDIAN
+# endif
+#endif /* ! GRUB_UTIL */
+
+#if GRUB_CPU_SIZEOF_VOID_P != 4 && GRUB_CPU_SIZEOF_VOID_P != 8
+# error "This architecture is not supported because sizeof(void *) != 4 and sizeof(void *) != 8"
+#endif
+
+#if GRUB_CPU_SIZEOF_LONG != 4 && GRUB_CPU_SIZEOF_LONG != 8
+# error "This architecture is not supported because sizeof(long) != 4 and sizeof(long) != 8"
+#endif
+
+#if !defined (GRUB_UTIL) && !defined (GRUB_TARGET_WORDSIZE)
+# if GRUB_TARGET_SIZEOF_VOID_P == 4
+# define GRUB_TARGET_WORDSIZE 32
+# elif GRUB_TARGET_SIZEOF_VOID_P == 8
+# define GRUB_TARGET_WORDSIZE 64
+# endif
+#endif
+
+/* Define various wide integers. */
+typedef signed char grub_int8_t;
+typedef short grub_int16_t;
+typedef int grub_int32_t;
+#if GRUB_CPU_SIZEOF_LONG == 8
+typedef long grub_int64_t;
+#else
+typedef long long grub_int64_t;
+#endif
+
+typedef unsigned char grub_uint8_t;
+typedef unsigned short grub_uint16_t;
+typedef unsigned grub_uint32_t;
+# define PRIxGRUB_UINT32_T "x"
+# define PRIuGRUB_UINT32_T "u"
+#if GRUB_CPU_SIZEOF_LONG == 8
+typedef unsigned long grub_uint64_t;
+# define PRIxGRUB_UINT64_T "lx"
+# define PRIuGRUB_UINT64_T "lu"
+#else
+typedef unsigned long long grub_uint64_t;
+# define PRIxGRUB_UINT64_T "llx"
+# define PRIuGRUB_UINT64_T "llu"
+#endif
+
+/* Misc types. */
+
+#if GRUB_CPU_SIZEOF_VOID_P == 8
+typedef grub_uint64_t grub_addr_t;
+typedef grub_uint64_t grub_size_t;
+typedef grub_int64_t grub_ssize_t;
+
+# define GRUB_SIZE_MAX 18446744073709551615UL
+
+# if GRUB_CPU_SIZEOF_LONG == 8
+# define PRIxGRUB_SIZE "lx"
+# define PRIxGRUB_ADDR "lx"
+# define PRIuGRUB_SIZE "lu"
+# define PRIdGRUB_SSIZE "ld"
+# else
+# define PRIxGRUB_SIZE "llx"
+# define PRIxGRUB_ADDR "llx"
+# define PRIuGRUB_SIZE "llu"
+# define PRIdGRUB_SSIZE "lld"
+# endif
+#else
+typedef grub_uint32_t grub_addr_t;
+typedef grub_uint32_t grub_size_t;
+typedef grub_int32_t grub_ssize_t;
+
+# define GRUB_SIZE_MAX 4294967295UL
+
+# define PRIxGRUB_SIZE "x"
+# define PRIxGRUB_ADDR "x"
+# define PRIuGRUB_SIZE "u"
+# define PRIdGRUB_SSIZE "d"
+#endif
+
+#define GRUB_UCHAR_MAX 0xFF
+#define GRUB_USHRT_MAX 65535
+#define GRUB_SHRT_MAX 0x7fff
+#define GRUB_UINT_MAX 4294967295U
+#define GRUB_INT_MAX 0x7fffffff
+#define GRUB_INT32_MIN (-2147483647 - 1)
+#define GRUB_INT32_MAX 2147483647
+
+#if GRUB_CPU_SIZEOF_LONG == 8
+# define GRUB_ULONG_MAX 18446744073709551615UL
+# define GRUB_LONG_MAX 9223372036854775807L
+# define GRUB_LONG_MIN (-9223372036854775807L - 1)
+#else
+# define GRUB_ULONG_MAX 4294967295UL
+# define GRUB_LONG_MAX 2147483647L
+# define GRUB_LONG_MIN (-2147483647L - 1)
+#endif
+
+typedef grub_uint64_t grub_properly_aligned_t;
+
+#define GRUB_PROPERLY_ALIGNED_ARRAY(name, size) grub_properly_aligned_t name[((size) + sizeof (grub_properly_aligned_t) - 1) / sizeof (grub_properly_aligned_t)]
+
+/* The type for representing a file offset. */
+typedef grub_uint64_t grub_off_t;
+
+/* The type for representing a disk block address. */
+typedef grub_uint64_t grub_disk_addr_t;
+
+/* Byte-orders. */
+static inline grub_uint16_t grub_swap_bytes16(grub_uint16_t _x)
+{
+ return (grub_uint16_t) ((_x << 8) | (_x >> 8));
+}
+
+#define grub_swap_bytes16_compile_time(x) ((((x) & 0xff) << 8) | (((x) & 0xff00) >> 8))
+#define grub_swap_bytes32_compile_time(x) ((((x) & 0xff) << 24) | (((x) & 0xff00) << 8) | (((x) & 0xff0000) >> 8) | (((x) & 0xff000000UL) >> 24))
+#define grub_swap_bytes64_compile_time(x) \
+({ \
+ grub_uint64_t _x = (x); \
+ (grub_uint64_t) ((_x << 56) \
+ | ((_x & (grub_uint64_t) 0xFF00ULL) << 40) \
+ | ((_x & (grub_uint64_t) 0xFF0000ULL) << 24) \
+ | ((_x & (grub_uint64_t) 0xFF000000ULL) << 8) \
+ | ((_x & (grub_uint64_t) 0xFF00000000ULL) >> 8) \
+ | ((_x & (grub_uint64_t) 0xFF0000000000ULL) >> 24) \
+ | ((_x & (grub_uint64_t) 0xFF000000000000ULL) >> 40) \
+ | (_x >> 56)); \
+})
+
+#if defined(__GNUC__) && (__GNUC__ > 3) && (__GNUC__ > 4 || __GNUC_MINOR__ >= 3)
+static inline grub_uint32_t grub_swap_bytes32(grub_uint32_t x)
+{
+ return __builtin_bswap32(x);
+}
+
+static inline grub_uint64_t grub_swap_bytes64(grub_uint64_t x)
+{
+ return __builtin_bswap64(x);
+}
+#else /* not gcc 4.3 or newer */
+static inline grub_uint32_t grub_swap_bytes32(grub_uint32_t _x)
+{
+ return ((_x << 24)
+ | ((_x & (grub_uint32_t) 0xFF00UL) << 8)
+ | ((_x & (grub_uint32_t) 0xFF0000UL) >> 8)
+ | (_x >> 24));
+}
+
+static inline grub_uint64_t grub_swap_bytes64(grub_uint64_t _x)
+{
+ return ((_x << 56)
+ | ((_x & (grub_uint64_t) 0xFF00ULL) << 40)
+ | ((_x & (grub_uint64_t) 0xFF0000ULL) << 24)
+ | ((_x & (grub_uint64_t) 0xFF000000ULL) << 8)
+ | ((_x & (grub_uint64_t) 0xFF00000000ULL) >> 8)
+ | ((_x & (grub_uint64_t) 0xFF0000000000ULL) >> 24)
+ | ((_x & (grub_uint64_t) 0xFF000000000000ULL) >> 40)
+ | (_x >> 56));
+}
+#endif /* not gcc 4.3 or newer */
+
+#ifdef GRUB_CPU_WORDS_BIGENDIAN
+# define grub_cpu_to_le16(x) grub_swap_bytes16(x)
+# define grub_cpu_to_le32(x) grub_swap_bytes32(x)
+# define grub_cpu_to_le64(x) grub_swap_bytes64(x)
+# define grub_le_to_cpu16(x) grub_swap_bytes16(x)
+# define grub_le_to_cpu32(x) grub_swap_bytes32(x)
+# define grub_le_to_cpu64(x) grub_swap_bytes64(x)
+# define grub_cpu_to_be16(x) ((grub_uint16_t) (x))
+# define grub_cpu_to_be32(x) ((grub_uint32_t) (x))
+# define grub_cpu_to_be64(x) ((grub_uint64_t) (x))
+# define grub_be_to_cpu16(x) ((grub_uint16_t) (x))
+# define grub_be_to_cpu32(x) ((grub_uint32_t) (x))
+# define grub_be_to_cpu64(x) ((grub_uint64_t) (x))
+# define grub_cpu_to_be16_compile_time(x) ((grub_uint16_t) (x))
+# define grub_cpu_to_be32_compile_time(x) ((grub_uint32_t) (x))
+# define grub_cpu_to_be64_compile_time(x) ((grub_uint64_t) (x))
+# define grub_be_to_cpu64_compile_time(x) ((grub_uint64_t) (x))
+# define grub_cpu_to_le32_compile_time(x) grub_swap_bytes32_compile_time(x)
+# define grub_cpu_to_le64_compile_time(x) grub_swap_bytes64_compile_time(x)
+# define grub_cpu_to_le16_compile_time(x) grub_swap_bytes16_compile_time(x)
+#else /* ! WORDS_BIGENDIAN */
+# define grub_cpu_to_le16(x) ((grub_uint16_t) (x))
+# define grub_cpu_to_le32(x) ((grub_uint32_t) (x))
+# define grub_cpu_to_le64(x) ((grub_uint64_t) (x))
+# define grub_le_to_cpu16(x) ((grub_uint16_t) (x))
+# define grub_le_to_cpu32(x) ((grub_uint32_t) (x))
+# define grub_le_to_cpu64(x) ((grub_uint64_t) (x))
+# define grub_cpu_to_be16(x) grub_swap_bytes16(x)
+# define grub_cpu_to_be32(x) grub_swap_bytes32(x)
+# define grub_cpu_to_be64(x) grub_swap_bytes64(x)
+# define grub_be_to_cpu16(x) grub_swap_bytes16(x)
+# define grub_be_to_cpu32(x) grub_swap_bytes32(x)
+# define grub_be_to_cpu64(x) grub_swap_bytes64(x)
+# define grub_cpu_to_be16_compile_time(x) grub_swap_bytes16_compile_time(x)
+# define grub_cpu_to_be32_compile_time(x) grub_swap_bytes32_compile_time(x)
+# define grub_cpu_to_be64_compile_time(x) grub_swap_bytes64_compile_time(x)
+# define grub_be_to_cpu64_compile_time(x) grub_swap_bytes64_compile_time(x)
+# define grub_cpu_to_le16_compile_time(x) ((grub_uint16_t) (x))
+# define grub_cpu_to_le32_compile_time(x) ((grub_uint32_t) (x))
+# define grub_cpu_to_le64_compile_time(x) ((grub_uint64_t) (x))
+
+#endif /* ! WORDS_BIGENDIAN */
+
+static inline grub_uint16_t grub_get_unaligned16 (const void *ptr)
+{
+ struct grub_unaligned_uint16_t
+ {
+ grub_uint16_t d;
+ } GRUB_PACKED;
+ const struct grub_unaligned_uint16_t *dd
+ = (const struct grub_unaligned_uint16_t *) ptr;
+ return dd->d;
+}
+
+static inline void grub_set_unaligned16 (void *ptr, grub_uint16_t val)
+{
+ struct grub_unaligned_uint16_t
+ {
+ grub_uint16_t d;
+ } GRUB_PACKED;
+ struct grub_unaligned_uint16_t *dd = (struct grub_unaligned_uint16_t *) ptr;
+ dd->d = val;
+}
+
+static inline grub_uint32_t grub_get_unaligned32 (const void *ptr)
+{
+ struct grub_unaligned_uint32_t
+ {
+ grub_uint32_t d;
+ } GRUB_PACKED;
+ const struct grub_unaligned_uint32_t *dd
+ = (const struct grub_unaligned_uint32_t *) ptr;
+ return dd->d;
+}
+
+static inline void grub_set_unaligned32 (void *ptr, grub_uint32_t val)
+{
+ struct grub_unaligned_uint32_t
+ {
+ grub_uint32_t d;
+ } GRUB_PACKED;
+ struct grub_unaligned_uint32_t *dd = (struct grub_unaligned_uint32_t *) ptr;
+ dd->d = val;
+}
+
+struct grub_unaligned_uint64
+{
+ grub_uint64_t val;
+} GRUB_PACKED;
+
+typedef struct grub_unaligned_uint64 grub_unaligned_uint64_t;
+
+static inline grub_uint64_t grub_get_unaligned64 (const void *ptr)
+{
+ const struct grub_unaligned_uint64 *dd
+ = (const struct grub_unaligned_uint64 *) ptr;
+ return dd->val;
+}
+
+static inline void grub_set_unaligned64 (void *ptr, grub_uint64_t val)
+{
+ struct grub_unaligned_uint64_t
+ {
+ grub_uint64_t d;
+ } GRUB_PACKED;
+ struct grub_unaligned_uint64_t *dd = (struct grub_unaligned_uint64_t *) ptr;
+ dd->d = val;
+}
+
+#define GRUB_CHAR_BIT 8
+
+#endif /* ! GRUB_TYPES_HEADER */
diff --git a/i386/i386/ast_check.c b/i386/i386/ast_check.c
index 982c7053..f3e1c350 100644
--- a/i386/i386/ast_check.c
+++ b/i386/i386/ast_check.c
@@ -37,23 +37,17 @@
/*
* Initialize for remote invocation of ast_check.
*/
-init_ast_check(processor)
- processor_t processor;
+void init_ast_check(processor)
+ const processor_t processor;
{
-#ifdef lint
- processor++;
-#endif /* lint */
}
/*
* Cause remote invocation of ast_check. Caller is at splsched().
*/
-cause_ast_check(processor)
- processor_t processor;
+void cause_ast_check(processor)
+ const processor_t processor;
{
-#ifdef lint
- processor++;
-#endif /* lint */
}
#endif /* NCPUS > 1 */
diff --git a/i386/i386/cpu.h b/i386/i386/cpu.h
new file mode 100644
index 00000000..1bf40dce
--- /dev/null
+++ b/i386/i386/cpu.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_CPU_H
+#define _X86_CPU_H
+
+#include <kern/macros.h>
+
+/*
+ * EFLAGS register flags.
+ */
+#define CPU_EFL_ONE 0x00000002
+#define CPU_EFL_IF 0x00000200
+
+/*
+ * Return the content of the EFLAGS register.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline unsigned long
+cpu_get_eflags(void)
+{
+ unsigned long eflags;
+
+ asm volatile("pushf\n"
+ "pop %0\n"
+ : "=r" (eflags)
+ : : "memory");
+
+ return eflags;
+}
+
+/*
+ * Enable local interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_enable(void)
+{
+ asm volatile("sti" : : : "memory");
+}
+
+/*
+ * Disable local interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_disable(void)
+{
+ asm volatile("cli" : : : "memory");
+}
+
+/*
+ * Restore the content of the EFLAGS register, possibly enabling interrupts.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_restore(unsigned long flags)
+{
+ asm volatile("push %0\n"
+ "popf\n"
+ : : "r" (flags)
+ : "memory");
+}
+
+/*
+ * Disable local interrupts, returning the previous content of the EFLAGS
+ * register.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline void
+cpu_intr_save(unsigned long *flags)
+{
+ *flags = cpu_get_eflags();
+ cpu_intr_disable();
+}
+
+/*
+ * Return true if interrupts are enabled.
+ *
+ * Implies a compiler barrier.
+ */
+static __always_inline int
+cpu_intr_enabled(void)
+{
+ unsigned long eflags;
+
+ eflags = cpu_get_eflags();
+ return (eflags & CPU_EFL_IF) ? 1 : 0;
+}
+
+#endif /* _X86_CPU_H */
diff --git a/i386/i386/db_disasm.c b/i386/i386/db_disasm.c
index e15293b0..4afbcf3e 100644
--- a/i386/i386/db_disasm.c
+++ b/i386/i386/db_disasm.c
@@ -950,10 +950,10 @@ db_read_address(
void
db_print_address(
- char * seg,
- int size,
- struct i_addr *addrp,
- task_t task)
+ const char * seg,
+ int size,
+ const struct i_addr *addrp,
+ task_t task)
{
if (addrp->is_reg) {
db_printf("%s", db_reg[size][addrp->disp]);
@@ -986,7 +986,7 @@ db_disasm_esc(
int inst,
int short_addr,
int size,
- char * seg,
+ const char * seg,
task_t task)
{
int regmodrm;
diff --git a/i386/i386/db_interface.c b/i386/i386/db_interface.c
index 90ca22dc..b3fac0bb 100644
--- a/i386/i386/db_interface.c
+++ b/i386/i386/db_interface.c
@@ -27,6 +27,7 @@
* Interface to new debugger.
*/
+#include <string.h>
#include <sys/reboot.h>
#include <vm/pmap.h>
@@ -43,6 +44,7 @@
#include "vm_param.h"
#include <vm/vm_map.h>
+#include <vm/vm_fault.h>
#include <kern/cpu_number.h>
#include <kern/printf.h>
#include <kern/thread.h>
@@ -59,8 +61,10 @@
#if MACH_KDB
/* Whether the kernel uses any debugging register. */
-static int kernel_dr;
+static boolean_t kernel_dr;
#endif
+/* Whether the current debug registers are zero. */
+static boolean_t zero_dr;
void db_load_context(pcb_t pcb)
{
@@ -72,13 +76,20 @@ void db_load_context(pcb_t pcb)
return;
}
#endif
+ /* Else set user debug registers, if any */
+ unsigned int *dr = pcb->ims.ids.dr;
+ boolean_t will_zero_dr = !dr[0] && !dr[1] && !dr[2] && !dr[3] && !dr[7];
+
+ if (!(zero_dr && will_zero_dr))
+ {
+ set_dr0(dr[0]);
+ set_dr1(dr[1]);
+ set_dr2(dr[2]);
+ set_dr3(dr[3]);
+ set_dr7(dr[7]);
+ zero_dr = will_zero_dr;
+ }
- /* Else set user debug registers */
- set_dr0(pcb->ims.ids.dr[0]);
- set_dr1(pcb->ims.ids.dr[1]);
- set_dr2(pcb->ims.ids.dr[2]);
- set_dr3(pcb->ims.ids.dr[3]);
- set_dr7(pcb->ims.ids.dr[7]);
#if MACH_KDB
splx(s);
#endif
@@ -135,7 +146,7 @@ void db_dr (
splx(s);
return;
}
- kernel_dr = 1;
+ kernel_dr = TRUE;
/* Clear user debugging registers */
set_dr7(0);
set_dr0(0);
@@ -161,7 +172,9 @@ void db_dr (
if (kernel_dr) {
if (!ids.dr[0] && !ids.dr[1] && !ids.dr[2] && !ids.dr[3]) {
/* Not used any more, switch back to user debugging registers */
- kernel_dr = 0;
+ set_dr7 (0);
+ kernel_dr = FALSE;
+ zero_dr = TRUE;
db_load_context(current_thread()->pcb);
}
}
@@ -170,12 +183,12 @@ void db_dr (
boolean_t
db_set_hw_watchpoint(
- db_watchpoint_t watch,
- unsigned num)
+ const db_watchpoint_t watch,
+ unsigned num)
{
vm_size_t size = watch->hiaddr - watch->loaddr;
db_addr_t addr = watch->loaddr;
- unsigned int kern_addr;
+ vm_offset_t kern_addr;
if (num >= 4)
return FALSE;
@@ -214,8 +227,9 @@ db_clear_hw_watchpoint(
* Print trap reason.
*/
void
-kdbprinttrap(type, code)
- int type, code;
+kdbprinttrap(
+ int type,
+ int code)
{
printf("kernel: %s (%d), code=%x\n",
trap_name(type), type, code);
@@ -232,7 +246,7 @@ boolean_t
kdb_trap(
int type,
int code,
- register struct i386_saved_state *regs)
+ struct i386_saved_state *regs)
{
spl_t s;
@@ -408,12 +422,12 @@ boolean_t db_no_vm_fault = TRUE;
int
db_user_to_kernel_address(
- task_t task,
+ const task_t task,
vm_offset_t addr,
- unsigned int *kaddr,
+ vm_offset_t *kaddr,
int flag)
{
- register pt_entry_t *ptp;
+ pt_entry_t *ptp;
boolean_t faulted = FALSE;
retry:
@@ -437,7 +451,7 @@ db_user_to_kernel_address(
}
return(-1);
}
- *kaddr = (unsigned)ptetokv(*ptp) + (addr & (INTEL_PGBYTES-1));
+ *kaddr = ptetokv(*ptp) + (addr & (INTEL_PGBYTES-1));
return(0);
}
@@ -448,13 +462,13 @@ db_user_to_kernel_address(
void
db_read_bytes(
vm_offset_t addr,
- register int size,
- register char *data,
+ int size,
+ char *data,
task_t task)
{
- register char *src;
- register int n;
- unsigned kern_addr;
+ char *src;
+ int n;
+ vm_offset_t kern_addr;
src = (char *)addr;
if ((addr >= VM_MIN_KERNEL_ADDRESS && addr < VM_MAX_KERNEL_ADDRESS) || task == TASK_NULL) {
@@ -491,19 +505,18 @@ db_read_bytes(
void
db_write_bytes(
vm_offset_t addr,
- register int size,
- register char *data,
+ int size,
+ char *data,
task_t task)
{
- register char *dst;
+ char *dst;
- register pt_entry_t *ptep0 = 0;
+ pt_entry_t *ptep0 = 0;
pt_entry_t oldmap0 = 0;
vm_offset_t addr1;
- register pt_entry_t *ptep1 = 0;
+ pt_entry_t *ptep1 = 0;
pt_entry_t oldmap1 = 0;
extern char etext;
- void db_write_bytes_user_space();
if ((addr < VM_MIN_KERNEL_ADDRESS) ^
((addr + size) <= VM_MIN_KERNEL_ADDRESS)) {
@@ -560,13 +573,13 @@ db_write_bytes(
void
db_write_bytes_user_space(
vm_offset_t addr,
- register int size,
- register char *data,
+ int size,
+ char *data,
task_t task)
{
- register char *dst;
- register int n;
- unsigned kern_addr;
+ char *dst;
+ int n;
+ vm_offset_t kern_addr;
while (size > 0) {
if (db_user_to_kernel_address(task, addr, &kern_addr, 1) < 0)
@@ -585,7 +598,7 @@ db_write_bytes_user_space(
boolean_t
db_check_access(
vm_offset_t addr,
- register int size,
+ int size,
task_t task)
{
int n;
@@ -616,7 +629,7 @@ boolean_t
db_phys_eq(
task_t task1,
vm_offset_t addr1,
- task_t task2,
+ const task_t task2,
vm_offset_t addr2)
{
vm_offset_t kern_addr1, kern_addr2;
@@ -639,16 +652,19 @@ db_phys_eq(
#define DB_USER_STACK_ADDR (VM_MIN_KERNEL_ADDRESS)
#define DB_NAME_SEARCH_LIMIT (DB_USER_STACK_ADDR-(INTEL_PGBYTES*3))
+#define GNU
+
+#ifndef GNU
static boolean_t
db_search_null(
- task_t task,
+ const task_t task,
vm_offset_t *svaddr,
vm_offset_t evaddr,
vm_offset_t *skaddr,
int flag)
{
- register unsigned vaddr;
- register unsigned *kaddr;
+ unsigned vaddr;
+ unsigned *kaddr;
kaddr = (unsigned *)*skaddr;
for (vaddr = *svaddr; vaddr > evaddr; ) {
@@ -669,13 +685,12 @@ db_search_null(
}
return FALSE;
}
-
-#define GNU
+#endif /* GNU */
#ifdef GNU
static boolean_t
looks_like_command(
- task_t task,
+ const task_t task,
char* kaddr)
{
char *c;
@@ -715,19 +730,19 @@ looks_like_command(
return TRUE;
}
-#endif
+#endif /* GNU */
void
db_task_name(
- task_t task)
+ const task_t task)
{
- register char *p;
- register int n;
- unsigned vaddr, kaddr;
+ char *p;
+ int n;
+ vm_offset_t vaddr, kaddr;
unsigned sp;
- if (task->map->pmap == kernel_pmap) {
- db_printf(DB_GNUMACH_TASK_NAME);
+ if (task->name[0]) {
+ db_printf("%s", task->name);
return;
}
@@ -758,12 +773,12 @@ db_task_name(
vaddr = (sp & ~(INTEL_PGBYTES - 1)) + INTEL_PGBYTES;
while (1) {
if (db_user_to_kernel_address(task, vaddr, &kaddr, 0) < 0)
- return FALSE;
+ return;
if (looks_like_command(task, (char*) kaddr))
break;
vaddr += INTEL_PGBYTES;
}
-#else
+#else /* GNU */
vaddr = DB_USER_STACK_ADDR;
kaddr = 0;
@@ -781,18 +796,18 @@ db_task_name(
db_printf(DB_NULL_TASK_NAME);
return;
}
-#endif
+#endif /* GNU */
ok:
n = DB_TASK_NAME_LEN-1;
#ifdef GNU
p = (char *)kaddr;
for (; n > 0; vaddr++, p++, n--) {
-#else
+#else /* GNU */
p = (char *)kaddr + sizeof(unsigned);
for (vaddr += sizeof(int); vaddr < DB_USER_STACK_ADDR && n > 0;
vaddr++, p++, n--) {
-#endif
+#endif /* GNU */
if (vaddr % INTEL_PGBYTES == 0) {
(void)db_user_to_kernel_address(task, vaddr, &kaddr, 0);
p = (char*)kaddr;
diff --git a/i386/i386/db_interface.h b/i386/i386/db_interface.h
index 3f6821da..8d7daeae 100644
--- a/i386/i386/db_interface.h
+++ b/i386/i386/db_interface.h
@@ -25,6 +25,7 @@
#include <kern/task.h>
#include <machine/thread.h>
#include <ddb/db_watch.h>
+#include <ddb/db_variables.h>
extern boolean_t kdb_trap (
int type,
@@ -57,7 +58,7 @@ extern boolean_t db_phys_eq (
extern int db_user_to_kernel_address(
task_t task,
vm_offset_t addr,
- unsigned int *kaddr,
+ vm_offset_t *kaddr,
int flag);
extern void db_task_name (task_t task);
@@ -88,6 +89,26 @@ extern void db_dr (
int type,
int len,
int persistence);
+
+extern void
+db_stack_trace_cmd(
+ db_expr_t addr,
+ boolean_t have_addr,
+ db_expr_t count,
+ const char *modif);
+
+extern void
+db_halt_cpu(void);
+extern void
+db_reset_cpu(void);
+
+void
+db_i386_reg_value(
+ struct db_variable *vp,
+ db_expr_t *valuep,
+ int flag,
+ struct db_var_aux_param *ap);
+
#endif
extern void db_get_debug_state(
@@ -99,4 +120,13 @@ extern kern_return_t db_set_debug_state(
extern void db_load_context(pcb_t pcb);
+extern void cnpollc(boolean_t on);
+
+void
+db_write_bytes_user_space(
+ vm_offset_t addr,
+ int size,
+ char *data,
+ task_t task);
+
#endif /* _I386_DB_INTERFACE_H_ */
diff --git a/i386/i386/db_machdep.h b/i386/i386/db_machdep.h
index c6ea3ca9..ae1f9c09 100644
--- a/i386/i386/db_machdep.h
+++ b/i386/i386/db_machdep.h
@@ -93,7 +93,6 @@ db_regs_t ddb_regs; /* register state */
#define DB_TASK_NAME_TITLE "COMMAND "
#define DB_TASK_NAME_LEN 23
#define DB_NULL_TASK_NAME "? "
-#define DB_GNUMACH_TASK_NAME "gnumach "
/* macro for checking if a thread has used floating-point */
diff --git a/i386/i386/db_trace.c b/i386/i386/db_trace.c
index 4e3bea39..c8789e71 100644
--- a/i386/i386/db_trace.c
+++ b/i386/i386/db_trace.c
@@ -35,6 +35,9 @@
#include <machine/db_machdep.h>
#include <machine/machspl.h>
+#include <machine/db_interface.h>
+#include <machine/db_trace.h>
+#include <i386at/model_dep.h>
#include <ddb/db_access.h>
#include <ddb/db_command.h>
@@ -45,13 +48,6 @@
#include "trap.h"
-long
-db_i386_reg_value(
- struct db_variable *vp,
- db_expr_t *valuep,
- int flag,
- struct db_var_aux_param *ap); /* forward */
-
/*
* Machine register set.
*/
@@ -112,10 +108,10 @@ struct i386_kregs {
long *
db_lookup_i386_kreg(
- char *name,
- int *kregp)
+ const char *name,
+ const long *kregp)
{
- register struct i386_kregs *kp;
+ struct i386_kregs *kp;
for (kp = i386_kregs; kp->name; kp++) {
if (strcmp(name, kp->name) == 0)
@@ -124,7 +120,7 @@ db_lookup_i386_kreg(
return 0;
}
-long
+void
db_i386_reg_value(
struct db_variable *vp,
db_expr_t *valuep,
@@ -133,8 +129,7 @@ db_i386_reg_value(
{
long *dp = 0;
db_expr_t null_reg = 0;
- register thread_t thread = ap->thread;
- extern unsigned int_stack_high;
+ thread_t thread = ap->thread;
if (db_option(ap->modif, 'u')) {
if (thread == THREAD_NULL) {
@@ -144,7 +139,7 @@ db_i386_reg_value(
if (thread == current_thread()) {
if (ddb_regs.cs & 0x3)
dp = vp->valuep;
- else if (ddb_regs.ebp < int_stack_high)
+ else if (ON_INT_STACK(ddb_regs.ebp))
db_error("cannot get/set user registers in nested interrupt\n");
}
} else {
@@ -205,7 +200,7 @@ db_find_trace_symbols(void)
/*
* Figure out how many arguments were passed into the frame at "fp".
*/
-int db_numargs_default = 5;
+const int db_numargs_default = 5;
int
db_numargs(
@@ -264,7 +259,7 @@ db_nextframe(
struct i386_frame **fp, /* in/out */
db_addr_t *ip, /* out */
long frame_type, /* in */
- thread_t thread) /* in */
+ const thread_t thread) /* in */
{
struct i386_saved_state *saved_regs;
struct interrupt_frame *ifp;
@@ -318,14 +313,6 @@ db_nextframe(
}
}
-void
-db_i386_stack_trace(
- thread_t th,
- struct i386_frame *frame,
- db_addr_t callpc,
- db_expr_t count,
- int flags); /* forward */
-
#define F_USER_TRACE 1
#define F_TRACE_THREAD 2
@@ -334,7 +321,7 @@ db_stack_trace_cmd(
db_expr_t addr,
boolean_t have_addr,
db_expr_t count,
- char *modif)
+ const char *modif)
{
boolean_t trace_thread = FALSE;
struct i386_frame *frame;
@@ -343,8 +330,8 @@ db_stack_trace_cmd(
thread_t th;
{
- register char *cp = modif;
- register char c;
+ const char *cp = modif;
+ char c;
while ((c = *cp++) != 0) {
if (c == 't')
@@ -361,7 +348,7 @@ db_stack_trace_cmd(
} else if (trace_thread) {
if (have_addr) {
th = (thread_t) addr;
- if (!db_check_thread_address_valid((db_addr_t)th))
+ if (!db_check_thread_address_valid(th))
return;
} else {
th = db_default_thread;
@@ -381,10 +368,10 @@ db_stack_trace_cmd(
return;
}
if ((th->state & TH_SWAPPED) || th->kernel_stack == 0) {
- register struct i386_saved_state *iss = &th->pcb->iss;
+ struct i386_saved_state *iss = &th->pcb->iss;
db_printf("Continuation ");
- db_task_printsym((db_expr_t)th->swap_func,
+ db_task_printsym((db_addr_t)th->swap_func,
DB_STGY_PROC,
th->task);
db_printf("\n");
@@ -392,7 +379,7 @@ db_stack_trace_cmd(
frame = (struct i386_frame *) (iss->ebp);
callpc = (db_addr_t) (iss->eip);
} else {
- register struct i386_kernel_state *iks;
+ struct i386_kernel_state *iks;
iks = STACK_IKS(th->kernel_stack);
frame = (struct i386_frame *) (iks->k_ebp);
callpc = (db_addr_t) (iks->k_eip);
@@ -412,7 +399,7 @@ db_stack_trace_cmd(
void
db_i386_stack_trace(
- thread_t th,
+ const thread_t th,
struct i386_frame *frame,
db_addr_t callpc,
db_expr_t count,
@@ -438,18 +425,18 @@ db_i386_stack_trace(
if (!db_trace_symbols_found)
db_find_trace_symbols();
- if (!INKERNEL((unsigned long)callpc) && !INKERNEL((unsigned long)frame)) {
+ if (!INKERNEL(callpc) && !INKERNEL(frame)) {
db_printf(">>>>> user space <<<<<\n");
user_frame++;
}
lastframe = 0;
while (count-- && frame != 0) {
- register int narg;
+ int narg;
char * name;
db_expr_t offset;
- if (INKERNEL((unsigned long)callpc) && user_frame == 0) {
+ if (INKERNEL(callpc) && user_frame == 0) {
db_addr_t call_func = 0;
db_sym_t sym_tmp;
@@ -474,7 +461,7 @@ db_i386_stack_trace(
frame_type = 0;
narg = db_numargs(frame, task);
}
- } else if (INKERNEL((unsigned long)callpc) ^ INKERNEL((unsigned long)frame)) {
+ } else if (INKERNEL(callpc) ^ INKERNEL(frame)) {
frame_type = 0;
narg = -1;
} else {
@@ -501,7 +488,7 @@ db_i386_stack_trace(
db_printf("...");
db_printf(")");
if (offset) {
- db_printf("+%x", offset);
+ db_printf("+0x%x", offset);
}
if (db_line_at_pc(0, &filename, &linenum, callpc)) {
db_printf(" [%s", filename);
@@ -519,7 +506,7 @@ db_i386_stack_trace(
break;
}
if (!INKERNEL(lastframe) ||
- (!INKERNEL((unsigned long)callpc) && !INKERNEL((unsigned long)frame)))
+ (!INKERNEL(callpc) && !INKERNEL(frame)))
user_frame++;
if (user_frame == 1) {
db_printf(">>>>> user space <<<<<\n");
@@ -546,6 +533,8 @@ db_find_kthread(
task_t task)
{
thread_t thread;
+ if (task == TASK_NULL)
+ task = db_current_task();
queue_iterate(&task->thread_list, thread, thread_t, thread_list) {
vm_offset_t usp = thread->pcb->iss.uesp/*ebp works*/;
@@ -571,18 +560,18 @@ static void db_cproc_state(
/* offsets in a cproc structure */
/* TODO: longs? */
-int db_cproc_next_offset = 0 * 4;
-int db_cproc_incarnation_offset = 1 * 4;
-int db_cproc_list_offset = 2 * 4;
-int db_cproc_wait_offset = 3 * 4;
-int db_cproc_context_offset = 5 * 4;
-int db_cproc_state_offset = 7 * 4;
-int db_cproc_stack_base_offset = 10 * 4 + sizeof(mach_msg_header_t);
-int db_cproc_stack_size_offset = 11 * 4 + sizeof(mach_msg_header_t);
+const int db_cproc_next_offset = 0 * 4;
+const int db_cproc_incarnation_offset = 1 * 4;
+const int db_cproc_list_offset = 2 * 4;
+const int db_cproc_wait_offset = 3 * 4;
+const int db_cproc_context_offset = 5 * 4;
+const int db_cproc_state_offset = 7 * 4;
+const int db_cproc_stack_base_offset = 10 * 4 + sizeof(mach_msg_header_t);
+const int db_cproc_stack_size_offset = 11 * 4 + sizeof(mach_msg_header_t);
/* offsets in a cproc_switch context structure */
-int db_cprocsw_framep_offset = 3 * 4;
-int db_cprocsw_pc_offset = 4 * 4;
+const int db_cprocsw_framep_offset = 3 * 4;
+const int db_cprocsw_pc_offset = 4 * 4;
#include <machine/setjmp.h>
@@ -646,7 +635,7 @@ void db_trace_cproc(
}
void db_all_cprocs(
- task_t task,
+ const task_t task,
db_expr_t cproc_list)
{
jmp_buf_t db_jmpbuf;
diff --git a/i386/i386/db_trace.h b/i386/i386/db_trace.h
new file mode 100644
index 00000000..604654c5
--- /dev/null
+++ b/i386/i386/db_trace.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _I386_DB_TRACE_H_
+#define _I386_DB_TRACE_H_
+
+struct i386_frame;
+
+void
+db_i386_stack_trace(
+ thread_t th,
+ struct i386_frame *frame,
+ db_addr_t callpc,
+ db_expr_t count,
+ int flags);
+
+#endif /* _I386_DB_TRACE_H_ */
diff --git a/i386/i386/debug.h b/i386/i386/debug.h
index f87b95bb..84397ba8 100644
--- a/i386/i386/debug.h
+++ b/i386/i386/debug.h
@@ -23,10 +23,12 @@
#ifndef _I386_DEBUG_
#define _I386_DEBUG_
+#ifndef __ASSEMBLER__
/* Dump a saved state.
Probably a good idea to have this around
even when DEBUG isn't turned on. */
-void dump_ss(struct i386_saved_state *st);
+void dump_ss(const struct i386_saved_state *st);
+#endif /* __ASSEMBLER__ */
#ifdef DEBUG
diff --git a/i386/i386/debug_i386.c b/i386/i386/debug_i386.c
index 937d7b4b..233caa72 100644
--- a/i386/i386/debug_i386.c
+++ b/i386/i386/debug_i386.c
@@ -27,7 +27,7 @@
#include "trap.h"
#include "debug.h"
-void dump_ss(struct i386_saved_state *st)
+void dump_ss(const struct i386_saved_state *st)
{
printf("Dump of i386_saved_state %p:\n", st);
printf("EAX %08lx EBX %08lx ECX %08lx EDX %08lx\n",
@@ -59,9 +59,8 @@ struct debug_trace_entry
struct debug_trace_entry debug_trace_buf[DEBUG_TRACE_LEN];
int debug_trace_pos;
-
void
-debug_trace_reset()
+debug_trace_reset(void)
{
int s = splhigh();
debug_trace_pos = 0;
@@ -89,7 +88,7 @@ print_entry(int i, int *col)
}
void
-debug_trace_dump()
+debug_trace_dump(void)
{
int s = splhigh();
int i;
@@ -129,21 +128,37 @@ debug_trace_dump()
splx(s);
}
-#include "syscall_sw.h"
+#include <kern/syscall_sw.h>
int syscall_trace = 0;
+task_t syscall_trace_task;
int
syscall_trace_print(int syscallvec, ...)
{
int syscallnum = syscallvec >> 4;
int i;
+ const mach_trap_t *trap = &mach_trap_table[syscallnum];
+
+ if (syscall_trace_task && syscall_trace_task != current_task())
+ goto out;
+
+ printf("0x%08x:0x%08x:%s(",
+ current_task(), current_thread(), trap->mach_trap_name);
+ for (i = 0; i < trap->mach_trap_arg_count; i++) {
+ unsigned long value = (&syscallvec)[1+i];
+ /* Use a crude heuristic to format pointers. */
+ if (value > 1024)
+ printf("0x%08x", value);
+ else
+ printf("%d", value);
+
+ if (i + 1 < trap->mach_trap_arg_count)
+ printf(", ");
+ }
+ printf(")\n");
- printf("syscall -%d:", syscallnum);
- for (i = 0; i < mach_trap_table[syscallnum].mach_trap_arg_count; i++)
- printf(" %08x", (&syscallvec)[1+i]);
- printf("\n");
-
+ out:
return syscallvec;
}
diff --git a/i386/i386/fpu.c b/i386/i386/fpu.c
index d35e3eff..6ee20150 100644
--- a/i386/i386/fpu.c
+++ b/i386/i386/fpu.c
@@ -53,6 +53,7 @@
#include <i386/pio.h>
#include <i386/pic.h>
#include <i386/locore.h>
+#include <i386/trap.h>
#include "cpu_number.h"
#if 0
@@ -68,15 +69,10 @@
#define ASSERT_IPL(L)
#endif
-extern void i386_exception();
-
int fp_kind = FP_387; /* 80387 present */
struct kmem_cache ifps_cache; /* cache for FPU save area */
static unsigned long mxcsr_feature_mask = 0xffffffff; /* Always AND user-provided mxcsr with this security mask */
-void fp_save(thread_t thread);
-void fp_load(thread_t thread);
-
#if NCPUS == 1
volatile thread_t fp_thread = THREAD_NULL;
/* thread whose state is in FPU */
@@ -105,7 +101,7 @@ volatile thread_t fp_intr_thread = THREAD_NULL;
* Called on each CPU.
*/
void
-init_fpu()
+init_fpu(void)
{
unsigned short status, control;
@@ -189,11 +185,11 @@ init_fpu()
* Initialize FP handling.
*/
void
-fpu_module_init()
+fpu_module_init(void)
{
kmem_cache_init(&ifps_cache, "i386_fpsave_state",
sizeof(struct i386_fpsave_state), 16,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
/*
@@ -201,8 +197,7 @@ fpu_module_init()
* Called only when thread terminating - no locking necessary.
*/
void
-fp_free(fps)
- struct i386_fpsave_state *fps;
+fp_free(struct i386_fpsave_state *fps)
{
ASSERT_IPL(SPL0);
#if NCPUS == 1
@@ -296,12 +291,12 @@ twd_fxsr_to_i387 (struct i386_xfp_save *fxsave)
*/
kern_return_t
fpu_set_state(thread, state)
- thread_t thread;
+ const thread_t thread;
struct i386_float_state *state;
{
- register pcb_t pcb = thread->pcb;
- register struct i386_fpsave_state *ifps;
- register struct i386_fpsave_state *new_ifps;
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
+ struct i386_fpsave_state *new_ifps;
ASSERT_IPL(SPL0);
if (fp_kind == FP_NO)
@@ -339,8 +334,8 @@ ASSERT_IPL(SPL0);
/*
* Valid state. Allocate the fp state if there is none.
*/
- register struct i386_fp_save *user_fp_state;
- register struct i386_fp_regs *user_fp_regs;
+ struct i386_fp_save *user_fp_state;
+ struct i386_fp_regs *user_fp_regs;
user_fp_state = (struct i386_fp_save *) &state->hw_state[0];
user_fp_regs = (struct i386_fp_regs *)
@@ -378,7 +373,7 @@ ASSERT_IPL(SPL0);
ifps->xfp_save_state.fp_dp = user_fp_state->fp_dp;
ifps->xfp_save_state.fp_ds = user_fp_state->fp_ds;
for (i=0; i<8; i++)
- memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs[i], sizeof(user_fp_regs[i]));
+ memcpy(&ifps->xfp_save_state.fp_reg_word[i], &user_fp_regs->fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
} else {
ifps->fp_save_state.fp_control = user_fp_state->fp_control;
ifps->fp_save_state.fp_status = user_fp_state->fp_status;
@@ -407,11 +402,11 @@ ASSERT_IPL(SPL0);
*/
kern_return_t
fpu_get_state(thread, state)
- thread_t thread;
- register struct i386_float_state *state;
+ const thread_t thread;
+ struct i386_float_state *state;
{
- register pcb_t pcb = thread->pcb;
- register struct i386_fpsave_state *ifps;
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
ASSERT_IPL(SPL0);
if (fp_kind == FP_NO)
@@ -445,8 +440,8 @@ ASSERT_IPL(SPL0);
state->exc_status = 0;
{
- register struct i386_fp_save *user_fp_state;
- register struct i386_fp_regs *user_fp_regs;
+ struct i386_fp_save *user_fp_state;
+ struct i386_fp_regs *user_fp_regs;
state->initialized = ifps->fp_valid;
@@ -471,7 +466,7 @@ ASSERT_IPL(SPL0);
user_fp_state->fp_dp = ifps->xfp_save_state.fp_dp;
user_fp_state->fp_ds = ifps->xfp_save_state.fp_ds;
for (i=0; i<8; i++)
- memcpy(&user_fp_regs[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs[i]));
+ memcpy(&user_fp_regs->fp_reg_word[i], &ifps->xfp_save_state.fp_reg_word[i], sizeof(user_fp_regs->fp_reg_word[i]));
} else {
user_fp_state->fp_control = ifps->fp_save_state.fp_control;
user_fp_state->fp_status = ifps->fp_save_state.fp_status;
@@ -499,31 +494,56 @@ ASSERT_IPL(SPL0);
*
* Use 53-bit precision.
*/
-void fpinit()
+static void fpinit(thread_t thread)
{
unsigned short control;
ASSERT_IPL(SPL0);
clear_ts();
fninit();
- fnstcw(&control);
- control &= ~(FPC_PC|FPC_RC); /* Clear precision & rounding control */
- control |= (FPC_PC_53 | /* Set precision */
- FPC_RC_RN | /* round-to-nearest */
- FPC_ZE | /* Suppress zero-divide */
- FPC_OE | /* and overflow */
- FPC_UE | /* underflow */
- FPC_IE | /* Allow NaNQs and +-INF */
- FPC_DE | /* Allow denorms as operands */
- FPC_PE); /* No trap for precision loss */
+ if (thread->pcb->init_control) {
+ control = thread->pcb->init_control;
+ }
+ else
+ {
+ fnstcw(&control);
+ control &= ~(FPC_PC|FPC_RC); /* Clear precision & rounding control */
+ control |= (FPC_PC_53 | /* Set precision */
+ FPC_RC_RN | /* round-to-nearest */
+ FPC_ZE | /* Suppress zero-divide */
+ FPC_OE | /* and overflow */
+ FPC_UE | /* underflow */
+ FPC_IE | /* Allow NaNQs and +-INF */
+ FPC_DE | /* Allow denorms as operands */
+ FPC_PE); /* No trap for precision loss */
+ }
fldcw(control);
}
/*
+ * Inherit FPU state from a parent to a child, if any
+ */
+void fpinherit(thread_t parent_thread, thread_t thread)
+{
+ pcb_t pcb = parent_thread->pcb;
+ struct i386_fpsave_state *ifps;
+
+ ifps = pcb->ims.ifps;
+ if (ifps) {
+ /* Parent does have a state, inherit it */
+ if (ifps->fp_valid == TRUE)
+ thread->pcb->init_control = ifps->fp_save_state.fp_control;
+ else
+ /* State is in the FPU, fetch from there */
+ fnstcw(&thread->pcb->init_control);
+ }
+}
+
+/*
* Coprocessor not present.
*/
void
-fpnoextflt()
+fpnoextflt(void)
{
/*
* Enable FPU use.
@@ -567,11 +587,11 @@ ASSERT_IPL(SPL0);
* Re-initialize FPU. Floating point state is not valid.
*/
void
-fpextovrflt()
+fpextovrflt(void)
{
- register thread_t thread = current_thread();
- register pcb_t pcb;
- register struct i386_fpsave_state *ifps;
+ thread_t thread = current_thread();
+ pcb_t pcb;
+ struct i386_fpsave_state *ifps;
#if NCPUS == 1
@@ -616,9 +636,9 @@ fpextovrflt()
}
static int
-fphandleerr()
+fphandleerr(void)
{
- register thread_t thread = current_thread();
+ thread_t thread = current_thread();
/*
* Save the FPU context to the thread using it.
@@ -663,9 +683,9 @@ fphandleerr()
* FPU error. Called by exception handler.
*/
void
-fpexterrflt()
+fpexterrflt(void)
{
- register thread_t thread = current_thread();
+ thread_t thread = current_thread();
if (fphandleerr())
return;
@@ -688,9 +708,9 @@ fpexterrflt()
* FPU error. Called by AST.
*/
void
-fpastintr()
+fpastintr(void)
{
- register thread_t thread = current_thread();
+ thread_t thread = current_thread();
ASSERT_IPL(SPL0);
#if NCPUS == 1
@@ -751,11 +771,10 @@ ASSERT_IPL(SPL0);
* . otherwise, thread is running.
*/
void
-fp_save(thread)
- register thread_t thread;
+fp_save(thread_t thread)
{
- register pcb_t pcb = thread->pcb;
- register struct i386_fpsave_state *ifps = pcb->ims.ifps;
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps = pcb->ims.ifps;
if (ifps != 0 && !ifps->fp_valid) {
/* registers are in FPU */
@@ -773,11 +792,10 @@ fp_save(thread)
* Locking not needed; always called on the current thread.
*/
void
-fp_load(thread)
- register thread_t thread;
+fp_load(thread_t thread)
{
- register pcb_t pcb = thread->pcb;
- register struct i386_fpsave_state *ifps;
+ pcb_t pcb = thread->pcb;
+ struct i386_fpsave_state *ifps;
ASSERT_IPL(SPL0);
ifps = pcb->ims.ifps;
@@ -785,7 +803,7 @@ ASSERT_IPL(SPL0);
ifps = (struct i386_fpsave_state *) kmem_cache_alloc(&ifps_cache);
memset(ifps, 0, sizeof *ifps);
pcb->ims.ifps = ifps;
- fpinit();
+ fpinit(thread);
#if 1
/*
* I'm not sure this is needed. Does the fpu regenerate the interrupt in
@@ -828,7 +846,7 @@ ASSERT_IPL(SPL0);
* Locking not needed; always called on the current thread.
*/
void
-fp_state_alloc()
+fp_state_alloc(void)
{
pcb_t pcb = current_thread()->pcb;
struct i386_fpsave_state *ifps;
diff --git a/i386/i386/fpu.h b/i386/i386/fpu.h
index 21561875..caade5d4 100644
--- a/i386/i386/fpu.h
+++ b/i386/i386/fpu.h
@@ -87,7 +87,7 @@
#if NCPUS > 1
#define fpu_save_context(thread) \
{ \
- register struct i386_fpsave_state *ifps; \
+ struct i386_fpsave_state *ifps; \
ifps = (thread)->pcb->ims.ifps; \
if (ifps != 0 && !ifps->fp_valid) { \
/* registers are in FPU - save to memory */ \
@@ -125,5 +125,6 @@ extern void fpexterrflt(void);
extern void fpastintr(void);
extern void init_fpu(void);
extern void fpintr(int unit);
+extern void fpinherit(thread_t parent_thread, thread_t thread);
#endif /* _I386_FPU_H_ */
diff --git a/i386/i386/gdt.c b/i386/i386/gdt.c
index 5523fea3..c895eb3a 100644
--- a/i386/i386/gdt.c
+++ b/i386/i386/gdt.c
@@ -46,7 +46,7 @@ extern
struct real_descriptor gdt[GDTSZ];
void
-gdt_init()
+gdt_init(void)
{
/* Initialize the kernel code and data segment descriptors. */
fill_gdt_descriptor(KERNEL_CS,
diff --git a/i386/i386/hardclock.c b/i386/i386/hardclock.c
index 66150332..49ea82cd 100644
--- a/i386/i386/hardclock.c
+++ b/i386/i386/hardclock.c
@@ -47,10 +47,10 @@ extern char return_to_iret[];
void
hardclock(iunit, old_ipl, irq, ret_addr, regs)
- int iunit; /* 'unit' number */
- int old_ipl; /* old interrupt level */
- int irq; /* irq number */
- char * ret_addr; /* return address in interrupt handler */
+ int iunit; /* 'unit' number */
+ int old_ipl; /* old interrupt level */
+ int irq; /* irq number */
+ const char * ret_addr; /* return address in interrupt handler */
struct i386_interrupt_state *regs;
/* saved registers */
{
@@ -77,5 +77,5 @@ hardclock(iunit, old_ipl, irq, ret_addr, regs)
#ifdef LINUX_DEV
linux_timer_intr();
-#endif
+#endif /* LINUX_DEV */
}
diff --git a/i386/i386/hardclock.h b/i386/i386/hardclock.h
new file mode 100644
index 00000000..38c51ea6
--- /dev/null
+++ b/i386/i386/hardclock.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _I386_HARDCLOCK_H_
+#define _I386_HARDCLOCK_H_
+
+void hardclock(
+ int iunit,
+ int old_ipl,
+ int irq,
+ char *ret_addr,
+ struct i386_interrupt_state *regs);
+
+#endif /* _I386_HARDCLOCK_H_ */
diff --git a/i386/i386/idt.c b/i386/i386/idt.c
index 882764f4..d304ec3e 100644
--- a/i386/i386/idt.c
+++ b/i386/i386/idt.c
@@ -36,7 +36,7 @@ struct idt_init_entry
};
extern struct idt_init_entry idt_inittab[];
-void idt_init()
+void idt_init(void)
{
#ifdef MACH_PV_DESCRIPTORS
if (hyp_set_trap_table(kvtolin(idt_inittab)))
diff --git a/i386/i386/io_map.c b/i386/i386/io_map.c
index b095f224..2c2aa720 100644
--- a/i386/i386/io_map.c
+++ b/i386/i386/io_map.c
@@ -37,9 +37,9 @@ extern vm_offset_t kernel_virtual_start;
* Mach VM is running.
*/
vm_offset_t
-io_map(phys_addr, size)
- vm_offset_t phys_addr;
- vm_size_t size;
+io_map(
+ vm_offset_t phys_addr,
+ vm_size_t size)
{
vm_offset_t start;
@@ -58,3 +58,35 @@ io_map(phys_addr, size)
VM_PROT_READ|VM_PROT_WRITE);
return (start);
}
+
+/*
+ * Allocate and map memory for devices that may need to be mapped before
+ * Mach VM is running.
+ *
+ * This maps the all pages containing [PHYS_ADDR:PHYS_ADDR + SIZE].
+ * For contiguous requests to those pages will reuse the previously
+ * established mapping.
+ *
+ * Warning: this leaks memory maps for now, do not use it yet for something
+ * else than Mach shutdown.
+ */
+vm_offset_t
+io_map_cached(
+ vm_offset_t phys_addr,
+ vm_size_t size)
+{
+ static vm_offset_t base;
+ static vm_size_t length;
+ static vm_offset_t map;
+
+ if (! map
+ || (phys_addr < base)
+ || (base + length < phys_addr + size))
+ {
+ base = trunc_page(phys_addr);
+ length = round_page(phys_addr - base + size);
+ map = io_map(base, length);
+ }
+
+ return map + (phys_addr - base);
+}
diff --git a/i386/i386/io_perm.c b/i386/i386/io_perm.c
index 8bacb8d5..d5c71035 100644
--- a/i386/i386/io_perm.c
+++ b/i386/i386/io_perm.c
@@ -66,6 +66,7 @@
#include "io_perm.h"
#include "gdt.h"
+#include "pcb.h"
/* Our device emulation ops. See below, at the bottom of this file. */
static struct device_emulation_ops io_perm_device_emulation_ops;
@@ -174,7 +175,7 @@ io_bitmap_clear (unsigned char *iopb, io_port_t from, io_port_t to)
The function is exported. */
kern_return_t
-i386_io_perm_create (ipc_port_t master_port, io_port_t from, io_port_t to,
+i386_io_perm_create (const ipc_port_t master_port, io_port_t from, io_port_t to,
io_perm_t *new)
{
if (master_port != master_device_port)
@@ -219,13 +220,8 @@ i386_io_perm_create (ipc_port_t master_port, io_port_t from, io_port_t to,
return KERN_SUCCESS;
}
-
-/* From pcb.c. */
-extern void update_ktss_iopb (unsigned char *new_iopb, int last);
-
-
/* Modify the I/O permissions for TARGET_TASK. If ENABLE is TRUE, the
- permission to acces the I/O ports specified by IO_PERM is granted,
+ permission to access the I/O ports specified by IO_PERM is granted,
otherwise it is withdrawn.
The function returns KERN_INVALID_ARGUMENT if TARGET_TASK is not a valid
diff --git a/i386/i386/io_perm.h b/i386/i386/io_perm.h
index a68e1038..a7f1f6fe 100644
--- a/i386/i386/io_perm.h
+++ b/i386/i386/io_perm.h
@@ -62,4 +62,4 @@ extern ipc_port_t convert_io_perm_to_port (io_perm_t);
extern void io_perm_deallocate (io_perm_t);
#endif
-#endif
+#endif /* _I386_IO_PERM_H_ */
diff --git a/i386/i386/ipl.h b/i386/i386/ipl.h
index 8f729e1d..2da2e89f 100644
--- a/i386/i386/ipl.h
+++ b/i386/i386/ipl.h
@@ -49,6 +49,8 @@ OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#ifndef _I386_IPL_H_
+#define _I386_IPL_H_
#define SPL0 0
#define SPL1 1
@@ -76,3 +78,5 @@ extern int intpri[];
extern spl_t curr_ipl;
#endif /* __ASSEMBLER__ */
#endif /* KERNEL */
+
+#endif /* _I386_IPL_H_ */
diff --git a/i386/i386/ktss.c b/i386/i386/ktss.c
index 1b2938a0..21d00300 100644
--- a/i386/i386/ktss.c
+++ b/i386/i386/ktss.c
@@ -40,7 +40,7 @@
struct task_tss ktss;
void
-ktss_init()
+ktss_init(void)
{
/* XXX temporary exception stack */
static int exception_stack[1024];
diff --git a/i386/i386/kttd_interface.c b/i386/i386/kttd_interface.c
index b9e0624b..c6caa76d 100644
--- a/i386/i386/kttd_interface.c
+++ b/i386/i386/kttd_interface.c
@@ -240,7 +240,7 @@ boolean_t kttd_mem_access(vm_offset_t offset, vm_prot_t access)
trunc_page(offset), access);
code = vm_fault(kernel_map, trunc_page(offset), access, FALSE,
FALSE, (void (*)()) 0);
- }else{
+ } else {
/*
* Check for user thread
*/
diff --git a/i386/i386/ldt.c b/i386/i386/ldt.c
index 43b9efb5..0250ee26 100644
--- a/i386/i386/ldt.c
+++ b/i386/i386/ldt.c
@@ -36,8 +36,7 @@
#include "seg.h"
#include "gdt.h"
#include "ldt.h"
-
-extern int syscall();
+#include "locore.h"
#ifdef MACH_PV_DESCRIPTORS
/* It is actually defined in xen_boothdr.S */
@@ -46,7 +45,7 @@ extern
struct real_descriptor ldt[LDTSZ];
void
-ldt_init()
+ldt_init(void)
{
#ifdef MACH_PV_DESCRIPTORS
#ifdef MACH_PV_PAGETABLES
diff --git a/i386/i386/lock.h b/i386/i386/lock.h
index 38a66c87..b989927b 100644
--- a/i386/i386/lock.h
+++ b/i386/i386/lock.h
@@ -44,7 +44,7 @@
*/
#define _simple_lock_xchg_(lock, new_val) \
- ({ register int _old_val_; \
+ ({ int _old_val_; \
asm volatile("xchgl %0, %2" \
: "=r" (_old_val_) \
: "0" (new_val), "m" (*(lock) : "memory") \
diff --git a/i386/i386/locore.S b/i386/i386/locore.S
index e1befa71..c715d959 100644
--- a/i386/i386/locore.S
+++ b/i386/i386/locore.S
@@ -119,6 +119,7 @@ LEXT(retry_table_end) ;\
* Uses %eax, %ebx, %ecx.
*/
#define TIME_TRAP_UENTRY \
+ pushf /* Save flags */ ;\
cli /* block interrupts */ ;\
movl VA_ETC,%ebx /* get timer value */ ;\
movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
@@ -131,7 +132,7 @@ LEXT(retry_table_end) ;\
0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
/* switch to sys timer */;\
movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
- sti /* allow interrupts */
+ popf /* allow interrupts */
/*
* Update time on system call entry.
@@ -141,6 +142,7 @@ LEXT(retry_table_end) ;\
* Same as TIME_TRAP_UENTRY, but preserves %eax.
*/
#define TIME_TRAP_SENTRY \
+ pushf /* Save flags */ ;\
cli /* block interrupts */ ;\
movl VA_ETC,%ebx /* get timer value */ ;\
movl CX(EXT(current_tstamp),%edx),%ecx /* get old time stamp */;\
@@ -155,7 +157,7 @@ LEXT(retry_table_end) ;\
0: addl $(TH_SYSTEM_TIMER-TH_USER_TIMER),%ecx ;\
/* switch to sys timer */;\
movl %ecx,CX(EXT(current_timer),%edx) /* make it current */ ;\
- sti /* allow interrupts */
+ popf /* allow interrupts */
/*
* update time on user trap exit.
@@ -540,8 +542,10 @@ trap_from_kernel:
#if MACH_KDB || MACH_TTD
movl %esp,%ebx /* save current stack */
- cmpl EXT(int_stack_high),%esp /* on an interrupt stack? */
- jb 1f /* OK if so */
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(KERNEL_STACK_SIZE-1)),%edx
+ cmpl EXT(int_stack_base),%edx
+ je 1f /* OK if so */
CPU_NUMBER(%edx) /* get CPU number */
cmpl CX(EXT(kernel_stack),%edx),%esp
@@ -645,8 +649,10 @@ ENTRY(all_intrs)
pushl %edx
cld /* clear direction flag */
- cmpl %ss:EXT(int_stack_high),%esp /* on an interrupt stack? */
- jb int_from_intstack /* if not: */
+ movl %esp,%edx /* on an interrupt stack? */
+ and $(~(KERNEL_STACK_SIZE-1)),%edx
+ cmpl %ss:EXT(int_stack_base),%edx
+ je int_from_intstack /* if not: */
pushl %ds /* save segment registers */
pushl %es
@@ -705,7 +711,7 @@ LEXT(return_to_iret) /* ( label for kdb_kintr and hardclock) */
iret /* return to caller */
int_from_intstack:
- cmpl $EXT(_intstack),%esp /* seemingly looping? */
+ cmpl EXT(int_stack_base),%esp /* seemingly looping? */
jb stack_overflowed /* if not: */
call EXT(interrupt) /* call interrupt routine */
_return_to_iret_i: /* ( label for kdb_kintr) */
@@ -1181,6 +1187,8 @@ ENTRY(discover_x86_cpu_type)
movl %esp,%ebp /* Save stack pointer */
and $~0x3,%esp /* Align stack pointer */
+#if 0
+/* Seems to hang with kvm linux 4.3.0 */
#ifdef MACH_HYP
#warning Assuming not Cyrix CPU
#else /* MACH_HYP */
@@ -1188,6 +1196,7 @@ ENTRY(discover_x86_cpu_type)
andb $0x80,%al /* ... in CCR4 reg bit7 */
outb %al,$0xe8
#endif /* MACH_HYP */
+#endif
pushfl /* Fetch flags ... */
popl %eax /* ... into eax */
@@ -1230,13 +1239,12 @@ ENTRY(discover_x86_cpu_type)
*/
/*
- * Copy from user address space.
+ * Copy from user address space - generic version.
* arg0: user address
* arg1: kernel address
* arg2: byte count
*/
ENTRY(copyin)
-Entry(copyinmsg)
pushl %esi
pushl %edi /* save registers */
@@ -1273,13 +1281,48 @@ copyin_fail:
jmp copyin_ret /* pop frame and return */
/*
- * Copy to user address space.
+ * Copy from user address space - version for copying messages.
+ * arg0: user address
+ * arg1: kernel address
+ * arg2: byte count - must be a multiple of four
+ */
+ENTRY(copyinmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get user start address */
+ movl 8+S_ARG1,%edi /* get kernel destination address */
+ movl 8+S_ARG2,%ecx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%ds
+
+ /*cld*/ /* count up: default mode in all GCC code */
+ shrl $2,%ecx
+ RECOVER(copyinmsg_fail)
+ rep
+ movsl /* move longwords */
+ xorl %eax,%eax /* return 0 for success */
+
+copyinmsg_ret:
+ mov %ss,%di /* restore DS to kernel segment */
+ mov %di,%ds
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyinmsg_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyinmsg_ret /* pop frame and return */
+
+/*
+ * Copy to user address space - generic version.
* arg0: kernel address
* arg1: user address
* arg2: byte count
*/
ENTRY(copyout)
-Entry(copyoutmsg)
pushl %esi
pushl %edi /* save registers */
@@ -1295,14 +1338,13 @@ Entry(copyoutmsg)
jbe copyout_retry /* Use slow version on i386 */
#endif /* !defined(MACH_HYP) && !PAE */
- movl %edx,%eax /* use count */
/*cld*/ /* count up: always this way in GCC code */
- movl %eax,%ecx /* move by longwords first */
+ movl %edx,%ecx /* move by longwords first */
shrl $2,%ecx
RECOVER(copyout_fail)
rep
movsl
- movl %eax,%ecx /* now move remaining bytes */
+ movl %edx,%ecx /* now move remaining bytes */
andl $3,%ecx
RECOVER(copyout_fail)
rep
@@ -1321,6 +1363,47 @@ copyout_fail:
movl $1,%eax /* return 1 for failure */
jmp copyout_ret /* pop frame and return */
+/*
+ * Copy to user address space - version for copying messages.
+ * arg0: kernel address
+ * arg1: user address
+ * arg2: byte count - must be a multiple of four
+ */
+ENTRY(copyoutmsg)
+ pushl %esi
+ pushl %edi /* save registers */
+
+ movl 8+S_ARG0,%esi /* get kernel start address */
+ movl 8+S_ARG1,%edi /* get user start address */
+ movl 8+S_ARG2,%ecx /* get count */
+
+ movl $USER_DS,%eax /* use user data segment for accesses */
+ mov %ax,%es
+
+#if !defined(MACH_HYP) && !PAE
+ movl 8+S_ARG2,%edx /* copyout_retry expects count here */
+ cmpl $3,machine_slot+SUB_TYPE_CPU_TYPE
+ jbe copyout_retry /* Use slow version on i386 */
+#endif /* !defined(MACH_HYP) && !PAE */
+
+ shrl $2,%ecx /* move by longwords */
+ RECOVER(copyoutmsg_fail)
+ rep
+ movsl
+ xorl %eax,%eax /* return 0 for success */
+
+copyoutmsg_ret:
+ mov %ss,%di /* restore ES to kernel segment */
+ mov %di,%es
+
+ popl %edi /* restore registers */
+ popl %esi
+ ret /* and return */
+
+copyoutmsg_fail:
+ movl $1,%eax /* return 1 for failure */
+ jmp copyoutmsg_ret /* pop frame and return */
+
#if !defined(MACH_HYP) && !PAE
/*
* Check whether user address space is writable
diff --git a/i386/i386/locore.h b/i386/i386/locore.h
index bfd13177..6948f72d 100644
--- a/i386/i386/locore.h
+++ b/i386/i386/locore.h
@@ -56,6 +56,8 @@ extern int inst_fetch (int eip, int cs);
extern void cpu_shutdown (void);
+extern int syscall (void);
+
extern unsigned int cpu_features[1];
#define CPU_FEATURE_FPU 0
diff --git a/i386/i386/loose_ends.c b/i386/i386/loose_ends.c
index d3108fdb..64b53b71 100644
--- a/i386/i386/loose_ends.c
+++ b/i386/i386/loose_ends.c
@@ -30,20 +30,18 @@
#define MACH_ASSERT 1
#else
#define MACH_ASSERT 0
-#endif
+#endif /* NDEBUG */
/*
* For now we will always go to single user mode, since there is
* no way pass this request through the boot.
*/
-int boothowto = 0;
/* Someone with time should write code to set cpuspeed automagically */
int cpuspeed = 4;
#define DELAY(n) { volatile int N = cpuspeed * (n); while (--N > 0); }
void
-delay(n)
- int n;
+delay(int n)
{
DELAY(n);
}
@@ -55,8 +53,8 @@ delay(n)
* levels of return pc information.
*/
void machine_callstack(
- unsigned long *buf,
- int callstack_max)
+ const unsigned long *buf,
+ int callstack_max)
{
}
diff --git a/i386/i386/machine_routines.h b/i386/i386/machine_routines.h
index a1fb489e..d9dd94be 100644
--- a/i386/i386/machine_routines.h
+++ b/i386/i386/machine_routines.h
@@ -31,7 +31,8 @@
* The i386 has a set of machine-dependent interfaces.
*/
#define MACHINE_SERVER mach_i386_server
+#define MACHINE_SERVER_HEADER "i386/i386/mach_i386.server.h"
#define MACHINE_SERVER_ROUTINE mach_i386_server_routine
-#endif
+#endif /* _I386_MACHINE_ROUTINES_H_ */
diff --git a/i386/i386/machine_task.c b/i386/i386/machine_task.c
index 62b22e3a..d592838a 100644
--- a/i386/i386/machine_task.c
+++ b/i386/i386/machine_task.c
@@ -38,7 +38,7 @@ void
machine_task_module_init (void)
{
kmem_cache_init (&machine_task_iopb_cache, "i386_task_iopb", IOPB_BYTES, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
@@ -55,7 +55,7 @@ machine_task_init (task_t task)
/* Destroy the machine specific part of task TASK and release all
associated resources. */
void
-machine_task_terminate (task_t task)
+machine_task_terminate (const task_t task)
{
if (task->machine.iopb)
kmem_cache_free (&machine_task_iopb_cache,
diff --git a/i386/i386/model_dep.h b/i386/i386/model_dep.h
index a41c474d..ab2738f8 100644
--- a/i386/i386/model_dep.h
+++ b/i386/i386/model_dep.h
@@ -36,6 +36,10 @@ extern void machine_init (void);
/* Conserve power on processor CPU. */
extern void machine_idle (int cpu);
+extern void resettodr (void);
+
+extern void startrtclock (void);
+
/*
* Halt a cpu.
*/
@@ -46,10 +50,6 @@ extern void halt_cpu (void) __attribute__ ((noreturn));
*/
extern void halt_all_cpus (boolean_t reboot) __attribute__ ((noreturn));
-extern void resettodr (void);
-
-extern void startrtclock (void);
-
/*
* More-specific code provides these;
* they indicate the total extent of physical memory
diff --git a/i386/i386/mp_desc.c b/i386/i386/mp_desc.c
index 95f55af2..6aa8e664 100644
--- a/i386/i386/mp_desc.c
+++ b/i386/i386/mp_desc.c
@@ -65,12 +65,6 @@ char intstack[]; /* bottom */
char eintstack[]; /* top */
/*
- * We allocate interrupt stacks from physical memory.
- */
-extern
-vm_offset_t avail_start;
-
-/*
* Multiprocessor i386/i486 systems use a separate copy of the
* GDT, IDT, LDT, and kernel TSS per processor. The first three
* are separate to avoid lock contention: the i386 uses locked
@@ -106,10 +100,9 @@ extern struct real_descriptor ldt[LDTSZ];
*/
struct mp_desc_table *
-mp_desc_init(mycpu)
- register int mycpu;
+mp_desc_init(int mycpu)
{
- register struct mp_desc_table *mpt;
+ struct mp_desc_table *mpt;
if (mycpu == master_cpu) {
/*
@@ -177,9 +170,9 @@ mp_desc_init(mycpu)
* is running. The machine array must show which CPUs exist.
*/
void
-interrupt_stack_alloc()
+interrupt_stack_alloc(void)
{
- register int i;
+ int i;
int cpu_count;
vm_offset_t stack_start;
@@ -244,7 +237,7 @@ simple_lock_pause(void)
}
kern_return_t
-cpu_control(int cpu, int *info, unsigned int count)
+cpu_control(int cpu, const int *info, unsigned int count)
{
printf("cpu_control(%d, %p, %d) not implemented\n",
cpu, info, count);
diff --git a/i386/i386/mp_desc.h b/i386/i386/mp_desc.h
index 03d7194d..9f963123 100644
--- a/i386/i386/mp_desc.h
+++ b/i386/i386/mp_desc.h
@@ -79,4 +79,6 @@ extern struct mp_desc_table * mp_desc_init(int);
#endif /* MULTIPROCESSOR */
+extern void start_other_cpus(void);
+
#endif /* _I386_MP_DESC_H_ */
diff --git a/i386/i386/pcb.c b/i386/i386/pcb.c
index 97ec00e6..dd2042d8 100644
--- a/i386/i386/pcb.c
+++ b/i386/i386/pcb.c
@@ -60,12 +60,6 @@
#include <i386/mp_desc.h>
#endif
-extern thread_t Load_context();
-extern thread_t Switch_context();
-extern void Thread_continue();
-
-extern void user_ldt_free();
-
struct kmem_cache pcb_cache;
vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
@@ -76,10 +70,10 @@ vm_offset_t kernel_stack[NCPUS]; /* top of active_stack */
* Attach a kernel stack to a thread.
*/
-void stack_attach(thread, stack, continuation)
- register thread_t thread;
- register vm_offset_t stack;
- void (*continuation)(thread_t);
+void stack_attach(
+ thread_t thread,
+ vm_offset_t stack,
+ void (*continuation)(thread_t))
{
counter(if (++c_stacks_current > c_stacks_max)
c_stacks_max = c_stacks_current);
@@ -102,7 +96,7 @@ void stack_attach(thread, stack, continuation)
/*
* Point top of kernel stack to user`s registers.
*/
- STACK_IEL(stack)->saved_state = &thread->pcb->iss;
+ STACK_IEL(stack)->saved_state = USER_REGS(thread);
}
/*
@@ -111,10 +105,9 @@ void stack_attach(thread, stack, continuation)
* Detaches a kernel stack from a thread, returning the old stack.
*/
-vm_offset_t stack_detach(thread)
- register thread_t thread;
+vm_offset_t stack_detach(thread_t thread)
{
- register vm_offset_t stack;
+ vm_offset_t stack;
counter(if (--c_stacks_current < c_stacks_min)
c_stacks_min = c_stacks_current);
@@ -136,8 +129,7 @@ vm_offset_t stack_detach(thread)
#define gdt_desc_p(mycpu,sel) \
((struct real_descriptor *)&curr_gdt(mycpu)[sel_idx(sel)])
-void switch_ktss(pcb)
- register pcb_t pcb;
+void switch_ktss(pcb_t pcb)
{
int mycpu = cpu_number();
{
@@ -166,7 +158,7 @@ void switch_ktss(pcb)
}
{
- register user_ldt_t tldt = pcb->ims.ldt;
+ user_ldt_t tldt = pcb->ims.ldt;
/*
* Set the thread`s LDT.
*/
@@ -201,8 +193,14 @@ void switch_ktss(pcb)
for (i=0; i < USER_GDT_SLOTS; i++) {
if (memcmp(gdt_desc_p (mycpu, USER_GDT + (i << 3)),
&pcb->ims.user_gdt[i], sizeof pcb->ims.user_gdt[i])) {
+ union {
+ struct real_descriptor real_descriptor;
+ uint64_t descriptor;
+ } user_gdt;
+ user_gdt.real_descriptor = pcb->ims.user_gdt[i];
+
if (hyp_do_update_descriptor(kv_to_ma(gdt_desc_p (mycpu, USER_GDT + (i << 3))),
- *(uint64_t *) &pcb->ims.user_gdt[i]))
+ user_gdt.descriptor))
panic("couldn't set user gdt %d\n",i);
}
}
@@ -249,12 +247,12 @@ update_ktss_iopb (unsigned char *new_iopb, io_port_t size)
* Move the current thread's kernel stack to the new thread.
*/
-void stack_handoff(old, new)
- register thread_t old;
- register thread_t new;
+void stack_handoff(
+ thread_t old,
+ thread_t new)
{
- register int mycpu = cpu_number();
- register vm_offset_t stack;
+ int mycpu = cpu_number();
+ vm_offset_t stack;
/*
* Save FP registers if in use.
@@ -306,15 +304,14 @@ void stack_handoff(old, new)
* user registers.
*/
- STACK_IEL(stack)->saved_state = &new->pcb->iss;
+ STACK_IEL(stack)->saved_state = USER_REGS(new);
}
/*
* Switch to the first thread on a CPU.
*/
-void load_context(new)
- register thread_t new;
+void load_context(thread_t new)
{
switch_ktss(new->pcb);
Load_context(new);
@@ -325,10 +322,10 @@ void load_context(new)
* Save the old thread`s kernel state or continuation,
* and return it.
*/
-thread_t switch_context(old, continuation, new)
- register thread_t old;
- void (*continuation)();
- register thread_t new;
+thread_t switch_context(
+ thread_t old,
+ void (*continuation)(),
+ thread_t new)
{
/*
* Save FP registers if in use.
@@ -371,18 +368,17 @@ thread_t switch_context(old, continuation, new)
return Switch_context(old, continuation, new);
}
-void pcb_module_init()
+void pcb_module_init(void)
{
kmem_cache_init(&pcb_cache, "pcb", sizeof(struct pcb), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
fpu_module_init();
}
-void pcb_init(thread)
- register thread_t thread;
+void pcb_init(task_t parent_task, thread_t thread)
{
- register pcb_t pcb;
+ pcb_t pcb;
pcb = (pcb_t) kmem_cache_alloc(&pcb_cache);
if (pcb == 0)
@@ -410,12 +406,16 @@ void pcb_init(thread)
pcb->iss.efl = EFL_USER_SET;
thread->pcb = pcb;
+
+ /* This is a new thread for the current task, make it inherit our FPU
+ state. */
+ if (parent_task == current_task())
+ fpinherit(current_thread(), thread);
}
-void pcb_terminate(thread)
- register thread_t thread;
+void pcb_terminate(thread_t thread)
{
- register pcb_t pcb = thread->pcb;
+ pcb_t pcb = thread->pcb;
counter(if (--c_threads_current < c_threads_min)
c_threads_min = c_threads_current);
@@ -435,7 +435,7 @@ void pcb_terminate(thread)
*/
void pcb_collect(thread)
- thread_t thread;
+ const thread_t thread;
{
}
@@ -446,18 +446,18 @@ void pcb_collect(thread)
* Set the status of the specified thread.
*/
-kern_return_t thread_setstatus(thread, flavor, tstate, count)
- thread_t thread;
- int flavor;
- thread_state_t tstate;
- unsigned int count;
+kern_return_t thread_setstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate,
+ unsigned int count)
{
switch (flavor) {
case i386_THREAD_STATE:
case i386_REGS_SEGS_STATE:
{
- register struct i386_thread_state *state;
- register struct i386_saved_state *saved_state;
+ struct i386_thread_state *state;
+ struct i386_saved_state *saved_state;
if (count < i386_THREAD_STATE_COUNT) {
return(KERN_INVALID_ARGUMENT);
@@ -599,7 +599,7 @@ kern_return_t thread_setstatus(thread, flavor, tstate, count)
case i386_V86_ASSIST_STATE:
{
- register struct i386_v86_assist_state *state;
+ struct i386_v86_assist_state *state;
vm_offset_t int_table;
int int_count;
@@ -626,7 +626,7 @@ kern_return_t thread_setstatus(thread, flavor, tstate, count)
case i386_DEBUG_STATE:
{
- register struct i386_debug_state *state;
+ struct i386_debug_state *state;
kern_return_t ret;
if (count < i386_DEBUG_STATE_COUNT)
@@ -652,11 +652,11 @@ kern_return_t thread_setstatus(thread, flavor, tstate, count)
* Get the status of the specified thread.
*/
-kern_return_t thread_getstatus(thread, flavor, tstate, count)
- register thread_t thread;
- int flavor;
- thread_state_t tstate; /* pointer to OUT array */
- unsigned int *count; /* IN/OUT */
+kern_return_t thread_getstatus(
+ thread_t thread,
+ int flavor,
+ thread_state_t tstate, /* pointer to OUT array */
+ unsigned int *count) /* IN/OUT */
{
switch (flavor) {
case THREAD_STATE_FLAVOR_LIST:
@@ -672,8 +672,8 @@ kern_return_t thread_getstatus(thread, flavor, tstate, count)
case i386_THREAD_STATE:
case i386_REGS_SEGS_STATE:
{
- register struct i386_thread_state *state;
- register struct i386_saved_state *saved_state;
+ struct i386_thread_state *state;
+ struct i386_saved_state *saved_state;
if (*count < i386_THREAD_STATE_COUNT)
return(KERN_INVALID_ARGUMENT);
@@ -743,7 +743,7 @@ kern_return_t thread_getstatus(thread, flavor, tstate, count)
* Temporary - replace by i386_io_map
*/
case i386_ISA_PORT_MAP_STATE: {
- register struct i386_isa_port_map_state *state;
+ struct i386_isa_port_map_state *state;
if (*count < i386_ISA_PORT_MAP_STATE_COUNT)
return(KERN_INVALID_ARGUMENT);
@@ -754,8 +754,8 @@ kern_return_t thread_getstatus(thread, flavor, tstate, count)
if (thread->task->machine.iopb == 0)
memset (state->pm, 0xff, sizeof state->pm);
else
- memcpy((char *) state->pm,
- (char *) thread->task->machine.iopb,
+ memcpy(state->pm,
+ thread->task->machine.iopb,
sizeof state->pm);
simple_unlock (&thread->task->machine.iopb_lock);
@@ -765,7 +765,7 @@ kern_return_t thread_getstatus(thread, flavor, tstate, count)
case i386_V86_ASSIST_STATE:
{
- register struct i386_v86_assist_state *state;
+ struct i386_v86_assist_state *state;
if (*count < i386_V86_ASSIST_STATE_COUNT)
return KERN_INVALID_ARGUMENT;
@@ -780,7 +780,7 @@ kern_return_t thread_getstatus(thread, flavor, tstate, count)
case i386_DEBUG_STATE:
{
- register struct i386_debug_state *state;
+ struct i386_debug_state *state;
if (*count < i386_DEBUG_STATE_COUNT)
return KERN_INVALID_ARGUMENT;
@@ -804,24 +804,22 @@ kern_return_t thread_getstatus(thread, flavor, tstate, count)
* will make the thread return 'retval' from a syscall.
*/
void
-thread_set_syscall_return(thread, retval)
- thread_t thread;
- kern_return_t retval;
+thread_set_syscall_return(
+ thread_t thread,
+ kern_return_t retval)
{
thread->pcb->iss.eax = retval;
}
-
/*
- * Return prefered address of user stack.
+ * Return preferred address of user stack.
* Always returns low address. If stack grows up,
* the stack grows away from this address;
* if stack grows down, the stack grows towards this
* address.
*/
vm_offset_t
-user_stack_low(stack_size)
- vm_size_t stack_size;
+user_stack_low(vm_size_t stack_size)
{
return (VM_MAX_ADDRESS - stack_size);
}
@@ -833,11 +831,11 @@ vm_offset_t
set_user_regs(stack_base, stack_size, exec_info, arg_size)
vm_offset_t stack_base; /* low address */
vm_offset_t stack_size;
- struct exec_info *exec_info;
+ const struct exec_info *exec_info;
vm_size_t arg_size;
{
vm_offset_t arg_addr;
- register struct i386_saved_state *saved_state;
+ struct i386_saved_state *saved_state;
arg_size = (arg_size + sizeof(int) - 1) & ~(sizeof(int)-1);
arg_addr = stack_base + stack_size - arg_size;
diff --git a/i386/i386/pcb.h b/i386/i386/pcb.h
index 21bdfd9c..cf476942 100644
--- a/i386/i386/pcb.h
+++ b/i386/i386/pcb.h
@@ -28,8 +28,10 @@
#include <sys/types.h>
#include <mach/exec/exec.h>
+#include <mach/thread_status.h>
+#include <machine/thread.h>
-extern void pcb_init (thread_t thread);
+extern void pcb_init (task_t parent_task, thread_t thread);
extern void pcb_terminate (thread_t thread);
@@ -56,7 +58,7 @@ extern vm_offset_t user_stack_low (vm_size_t stack_size);
extern vm_offset_t set_user_regs (
vm_offset_t stack_base,
vm_offset_t stack_size,
- struct exec_info *exec_info,
+ const struct exec_info *exec_info,
vm_size_t arg_size);
extern void load_context (thread_t new);
@@ -70,4 +72,14 @@ extern vm_offset_t stack_detach (thread_t thread);
extern void switch_ktss (pcb_t pcb);
+extern void update_ktss_iopb (unsigned char *new_iopb, io_port_t size);
+
+extern thread_t Load_context (thread_t new);
+
+extern thread_t Switch_context (thread_t old, void (*continuation)(), thread_t new);
+
+extern void Thread_continue (void);
+
+extern void pcb_module_init (void);
+
#endif /* _I386_PCB_H_ */
diff --git a/i386/i386/phys.c b/i386/i386/phys.c
index ed4a309a..8681fba3 100644
--- a/i386/i386/phys.c
+++ b/i386/i386/phys.c
@@ -37,30 +37,75 @@
#include <vm/vm_page.h>
#include <i386/pmap.h>
+#include <i386/model_dep.h>
#include <mach/machine/vm_param.h>
+#define INTEL_PTE_W(p) (INTEL_PTE_VALID | INTEL_PTE_WRITE | INTEL_PTE_REF | INTEL_PTE_MOD | pa_to_pte(p))
+#define INTEL_PTE_R(p) (INTEL_PTE_VALID | INTEL_PTE_REF | pa_to_pte(p))
+
/*
* pmap_zero_page zeros the specified (machine independent) page.
*/
void
-pmap_zero_page(p)
- vm_offset_t p;
+pmap_zero_page(vm_offset_t p)
{
assert(p != vm_page_fictitious_addr);
- memset((void *)phystokv(p), 0, PAGE_SIZE);
+ vm_offset_t v;
+ pmap_mapwindow_t *map;
+ boolean_t mapped = p >= phys_last_addr;
+
+ if (mapped)
+ {
+ map = pmap_get_mapwindow(INTEL_PTE_W(p));
+ v = map->vaddr;
+ }
+ else
+ v = phystokv(p);
+
+ memset((void*) v, 0, PAGE_SIZE);
+
+ if (mapped)
+ pmap_put_mapwindow(map);
}
/*
* pmap_copy_page copies the specified (machine independent) pages.
*/
void
-pmap_copy_page(src, dst)
- vm_offset_t src, dst;
+pmap_copy_page(
+ vm_offset_t src,
+ vm_offset_t dst)
{
+ vm_offset_t src_addr_v, dst_addr_v;
+ pmap_mapwindow_t *src_map = NULL;
+ pmap_mapwindow_t *dst_map;
+ boolean_t src_mapped = src >= phys_last_addr;
+ boolean_t dst_mapped = dst >= phys_last_addr;
assert(src != vm_page_fictitious_addr);
assert(dst != vm_page_fictitious_addr);
- memcpy((void *)phystokv(dst), (void *)phystokv(src), PAGE_SIZE);
+ if (src_mapped)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src));
+ src_addr_v = src_map->vaddr;
+ }
+ else
+ src_addr_v = phystokv(src);
+
+ if (dst_mapped)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst));
+ dst_addr_v = dst_map->vaddr;
+ }
+ else
+ dst_addr_v = phystokv(dst);
+
+ memcpy((void *) dst_addr_v, (void *) src_addr_v, PAGE_SIZE);
+
+ if (src_mapped)
+ pmap_put_mapwindow(src_map);
+ if (dst_mapped)
+ pmap_put_mapwindow(dst_map);
}
/*
@@ -69,12 +114,29 @@ pmap_copy_page(src, dst)
* Copy virtual memory to physical memory
*/
void
-copy_to_phys(src_addr_v, dst_addr_p, count)
- vm_offset_t src_addr_v, dst_addr_p;
- int count;
+copy_to_phys(
+ vm_offset_t src_addr_v,
+ vm_offset_t dst_addr_p,
+ int count)
{
+ vm_offset_t dst_addr_v;
+ pmap_mapwindow_t *dst_map;
+ boolean_t mapped = dst_addr_p >= phys_last_addr;
assert(dst_addr_p != vm_page_fictitious_addr);
- memcpy((void *)phystokv(dst_addr_p), (void *)src_addr_v, count);
+ assert(pa_to_pte(dst_addr_p + count-1) == pa_to_pte(dst_addr_p));
+
+ if (mapped)
+ {
+ dst_map = pmap_get_mapwindow(INTEL_PTE_W(dst_addr_p));
+ dst_addr_v = dst_map->vaddr;
+ }
+ else
+ dst_addr_v = phystokv(dst_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (mapped)
+ pmap_put_mapwindow(dst_map);
}
/*
@@ -84,12 +146,29 @@ copy_to_phys(src_addr_v, dst_addr_p, count)
* is assumed to be present (e.g. the buffer pool).
*/
void
-copy_from_phys(src_addr_p, dst_addr_v, count)
- vm_offset_t src_addr_p, dst_addr_v;
- int count;
+copy_from_phys(
+ vm_offset_t src_addr_p,
+ vm_offset_t dst_addr_v,
+ int count)
{
+ vm_offset_t src_addr_v;
+ pmap_mapwindow_t *src_map;
+ boolean_t mapped = src_addr_p >= phys_last_addr;
assert(src_addr_p != vm_page_fictitious_addr);
- memcpy((void *)dst_addr_v, (void *)phystokv(src_addr_p), count);
+ assert(pa_to_pte(src_addr_p + count-1) == pa_to_pte(src_addr_p));
+
+ if (mapped)
+ {
+ src_map = pmap_get_mapwindow(INTEL_PTE_R(src_addr_p));
+ src_addr_v = src_map->vaddr;
+ }
+ else
+ src_addr_v = phystokv(src_addr_p);
+
+ memcpy((void *)dst_addr_v, (void *)src_addr_v, count);
+
+ if (mapped)
+ pmap_put_mapwindow(src_map);
}
/*
@@ -98,8 +177,7 @@ copy_from_phys(src_addr_p, dst_addr_v, count)
* Convert a kernel virtual address to a physical address
*/
vm_offset_t
-kvtophys(addr)
-vm_offset_t addr;
+kvtophys(vm_offset_t addr)
{
pt_entry_t *pte;
diff --git a/i386/i386/pic.c b/i386/i386/pic.c
index 50ca1509..e8c881af 100644
--- a/i386/i386/pic.c
+++ b/i386/i386/pic.c
@@ -62,9 +62,6 @@ int curr_pic_mask;
int iunit[NINTR] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
-int nintr = NINTR;
-int npics = NPICS;
-
unsigned short master_icw, master_ocw, slaves_icw, slaves_ocw;
u_short PICM_ICW1, PICM_OCW1, PICS_ICW1, PICS_OCW1 ;
@@ -105,7 +102,7 @@ u_short PICM_ICW4, PICS_ICW4 ;
*/
void
-picinit()
+picinit(void)
{
asm("cli");
@@ -223,7 +220,7 @@ picinit()
#define SLAVEACTV 0xFF00
void
-form_pic_mask()
+form_pic_mask(void)
{
unsigned int i, j, bit, mask;
@@ -240,7 +237,7 @@ form_pic_mask()
}
void
-intnull(unit_dev)
+intnull(int unit_dev)
{
printf("intnull(%d)\n", unit_dev);
}
@@ -248,7 +245,7 @@ intnull(unit_dev)
int prtnull_count = 0;
void
-prtnull(unit)
+prtnull(int unit)
{
++prtnull_count;
}
diff --git a/i386/i386/pic.h b/i386/i386/pic.h
index 52f6ec16..80bf65d6 100644
--- a/i386/i386/pic.h
+++ b/i386/i386/pic.h
@@ -183,6 +183,6 @@ extern int curr_pic_mask;
extern int pic_mask[];
extern void prtnull(int unit);
extern void intnull(int unit);
-#endif
+#endif /* __ASSEMBLER__ */
#endif /* _I386_PIC_H_ */
diff --git a/i386/i386/pit.c b/i386/i386/pit.c
index 4f156d87..da683308 100644
--- a/i386/i386/pit.c
+++ b/i386/i386/pit.c
@@ -57,8 +57,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
int pitctl_port = PITCTL_PORT; /* For 386/20 Board */
int pitctr0_port = PITCTR0_PORT; /* For 386/20 Board */
-int pitctr1_port = PITCTR1_PORT; /* For 386/20 Board */
-int pitctr2_port = PITCTR2_PORT; /* For 386/20 Board */
/* We want PIT 0 in square wave mode */
int pit0_mode = PIT_C0|PIT_SQUAREMODE|PIT_READMODE ;
@@ -67,7 +65,7 @@ int pit0_mode = PIT_C0|PIT_SQUAREMODE|PIT_READMODE ;
unsigned int clknumb = CLKNUM; /* interrupt interval for timer 0 */
void
-clkstart()
+clkstart(void)
{
unsigned char byte;
unsigned long s;
diff --git a/i386/i386/setjmp.h b/i386/i386/setjmp.h
index 667eecfa..930a9dd5 100644
--- a/i386/i386/setjmp.h
+++ b/i386/i386/setjmp.h
@@ -35,8 +35,6 @@ typedef struct jmp_buf {
extern int _setjmp(jmp_buf_t*);
-#ifdef __GNUC__
-extern __volatile__ void _longjmp(jmp_buf_t*, int);
-#endif
+extern void _longjmp(jmp_buf_t*, int) __attribute__ ((noreturn));
#endif /* _I386_SETJMP_H_ */
diff --git a/i386/i386/spl.S b/i386/i386/spl.S
index 3c075092..1dce991d 100644
--- a/i386/i386/spl.S
+++ b/i386/i386/spl.S
@@ -140,15 +140,35 @@ Entry(splsched)
Entry(splhigh)
Entry(splhi)
ENTRY(spl7)
- SETIPL(SPL7)
+ /* ipl7 just clears IF */
+ movl $SPL7,%eax
+ xchgl EXT(curr_ipl),%eax
+ cli
+ ret
ENTRY(splx)
movl S_ARG0,%edx /* get ipl */
+
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,EXT(curr_ipl) /* from ipl7? */
+ jne 0f
+ pushfl
+ popl %eax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
testl %edx,%edx /* spl0? */
jz EXT(spl0) /* yes, handle specially */
cmpl EXT(curr_ipl),%edx /* same ipl as current? */
jne spl /* no */
+ cmpl $SPL7,%edx /* spl7? */
+ je 1f /* to ipl7, don't enable interrupts */
sti /* ensure interrupts are enabled */
+1:
movl %edx,%eax /* return previous ipl */
ret
@@ -207,6 +227,20 @@ splx_cli:
.align TEXT_ALIGN
.globl spl
spl:
+#if (MACH_KDB || MACH_TTD) && !defined(MACH_XEN)
+ /* First make sure that if we're exitting from ipl7, IF is still cleared */
+ cmpl $SPL7,EXT(curr_ipl) /* from ipl7? */
+ jne 0f
+ pushfl
+ popl %eax
+ testl $0x200,%eax /* IF? */
+ jz 0f
+ int3 /* Oops, interrupts got enabled?! */
+
+0:
+#endif /* (MACH_KDB || MACH_TTD) && !MACH_XEN */
+ cmpl $SPL7,%edx /* spl7? */
+ je EXT(spl7) /* yes, handle specially */
movl EXT(pic_mask)(,%edx,4),%eax
/* get PIC mask */
cli /* disable interrupts */
diff --git a/i386/i386/thread.h b/i386/i386/thread.h
index 450ec55f..9bda11f5 100644
--- a/i386/i386/thread.h
+++ b/i386/i386/thread.h
@@ -177,9 +177,10 @@ typedef struct pcb {
struct i386_saved_state iss;
struct i386_machine_state ims;
decl_simple_lock_data(, lock)
+ unsigned short init_control; /* Initial FPU control to set */
#ifdef LINUX_DEV
void *data;
-#endif
+#endif /* LINUX_DEV */
} *pcb_t;
/*
diff --git a/i386/i386/trap.c b/i386/i386/trap.c
index c6aab488..64705049 100644
--- a/i386/i386/trap.c
+++ b/i386/i386/trap.c
@@ -37,6 +37,7 @@
#include <i386/model_dep.h>
#include <intel/read_fault.h>
#include <machine/machspl.h> /* for spl_t */
+#include <machine/db_interface.h>
#include <mach/exception.h>
#include <mach/kern_return.h>
@@ -54,6 +55,7 @@
#include <kern/task.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
+#include <kern/exception.h>
#if MACH_KDB
#include <ddb/db_run.h>
@@ -62,21 +64,16 @@
#include "debug.h"
-extern void exception() __attribute__ ((noreturn));
-extern void thread_exception_return() __attribute__ ((noreturn));
-
-extern void i386_exception() __attribute__ ((noreturn));
-
#if MACH_KDB
boolean_t debug_all_traps_with_kdb = FALSE;
extern struct db_watchpoint *db_watchpoint_list;
extern boolean_t db_watchpoints_inserted;
void
-thread_kdb_return()
+thread_kdb_return(void)
{
- register thread_t thread = current_thread();
- register struct i386_saved_state *regs = USER_REGS(thread);
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
if (kdb_trap(regs->trapno, regs->err, regs)) {
thread_exception_return();
@@ -91,11 +88,10 @@ boolean_t debug_all_traps_with_kttd = TRUE;
#endif /* MACH_TTD */
void
-user_page_fault_continue(kr)
- kern_return_t kr;
+user_page_fault_continue(kern_return_t kr)
{
- register thread_t thread = current_thread();
- register struct i386_saved_state *regs = USER_REGS(thread);
+ thread_t thread = current_thread();
+ struct i386_saved_state *regs = USER_REGS(thread);
if (kr == KERN_SUCCESS) {
#if MACH_KDB
@@ -150,23 +146,19 @@ char *trap_name(unsigned int trapnum)
return trapnum < TRAP_TYPES ? trap_type[trapnum] : "(unknown)";
}
-
-boolean_t brb = TRUE;
-
/*
* Trap from kernel mode. Only page-fault errors are recoverable,
* and then only in special circumstances. All other errors are
* fatal.
*/
-void kernel_trap(regs)
- register struct i386_saved_state *regs;
+void kernel_trap(struct i386_saved_state *regs)
{
- int code;
- int subcode;
- register int type;
+ int code;
+ int subcode;
+ int type;
vm_map_t map;
kern_return_t result;
- register thread_t thread;
+ thread_t thread;
extern char _start[], etext[];
type = regs->trapno;
@@ -225,9 +217,9 @@ dump_ss(regs);
goto badtrap;
}
} else {
- assert(thread);
- map = thread->task->map;
- if (map == kernel_map) {
+ if (thread)
+ map = thread->task->map;
+ if (!thread || map == kernel_map) {
printf("kernel page fault at %08x:\n", subcode);
dump_ss(regs);
panic("kernel thread accessed user space!\n");
@@ -275,7 +267,7 @@ dump_ss(regs);
* Certain faults require that we back up
* the EIP.
*/
- register struct recovery *rp;
+ struct recovery *rp;
/* Linear searching; but the list is small enough. */
for (rp = retry_table; rp < retry_table_end; rp++) {
@@ -292,7 +284,7 @@ dump_ss(regs);
* for this fault, go there.
*/
{
- register struct recovery *rp;
+ struct recovery *rp;
/* Linear searching; but the list is small enough. */
for (rp = recover_table;
@@ -351,14 +343,13 @@ dump_ss(regs);
* Trap from user mode.
* Return TRUE if from emulated system call.
*/
-int user_trap(regs)
- register struct i386_saved_state *regs;
+int user_trap(struct i386_saved_state *regs)
{
int exc = 0; /* Suppress gcc warning */
int code;
int subcode;
- register int type;
- register thread_t thread = current_thread();
+ int type;
+ thread_t thread = current_thread();
if ((vm_offset_t)thread < phys_last_addr) {
printf("user_trap: bad thread pointer 0x%p\n", thread);
@@ -403,7 +394,12 @@ printf("user trap %d error %d sub %08x\n", type, code, subcode);
if (kdb_trap(type, regs->err, regs))
return 0;
}
-#endif
+#endif /* MACH_KDB */
+ /* Make the content of the debug status register (DR6)
+ available to user space. */
+ if (thread->pcb)
+ thread->pcb->ims.ids.dr[6] = get_dr6() & 0x600F;
+ set_dr6(0);
exc = EXC_BREAKPOINT;
code = EXC_I386_SGL;
break;
@@ -425,7 +421,7 @@ printf("user trap %d error %d sub %08x\n", type, code, subcode);
return 0;
}
}
-#endif
+#endif /* MACH_KDB */
exc = EXC_BREAKPOINT;
code = EXC_I386_BPT;
break;
@@ -524,7 +520,7 @@ printf("user trap %d error %d sub %08x\n", type, code, subcode);
}
return 0;
}
-#endif
+#endif /* MACH_PV_PAGETABLES */
case T_FLOATING_POINT_ERROR:
fpexterrflt();
@@ -561,26 +557,15 @@ printf("user trap %d error %d sub %08x\n", type, code, subcode);
/*NOTREACHED*/
}
-/*
- * V86 mode assist for interrupt handling.
- */
-boolean_t v86_assist_on = TRUE;
-boolean_t v86_unsafe_ok = FALSE;
-boolean_t v86_do_sti_cli = TRUE;
-boolean_t v86_do_sti_immediate = FALSE;
-
#define V86_IRET_PENDING 0x4000
-int cli_count = 0;
-int sti_count = 0;
-
/*
* Handle AST traps for i386.
* Check for delayed floating-point exception from
* AT-bus machines.
*/
void
-i386_astintr()
+i386_astintr(void)
{
int mycpu = cpu_number();
@@ -589,7 +574,7 @@ i386_astintr()
if (need_ast[mycpu] & AST_I386_FP) {
/*
* AST was for delayed floating-point exception -
- * FP interrupt occured while in kernel.
+ * FP interrupt occurred while in kernel.
* Turn off this AST reason and handle the FPU error.
*/
ast_off(mycpu, AST_I386_FP);
@@ -619,10 +604,10 @@ i386_astintr()
* emulator.
*/
void
-i386_exception(exc, code, subcode)
- int exc;
- int code;
- int subcode;
+i386_exception(
+ int exc,
+ int code,
+ int subcode)
{
spl_t s;
@@ -643,11 +628,11 @@ i386_exception(exc, code, subcode)
*/
unsigned
interrupted_pc(t)
- thread_t t;
+ const thread_t t;
{
- register struct i386_saved_state *iss;
+ struct i386_saved_state *iss;
iss = USER_REGS(t);
return iss->eip;
}
-#endif /* MACH_PCSAMPLE > 0*/
+#endif /* MACH_PCSAMPLE > 0 */
diff --git a/i386/i386/trap.h b/i386/i386/trap.h
index b4e92246..46612db5 100644
--- a/i386/i386/trap.h
+++ b/i386/i386/trap.h
@@ -36,6 +36,15 @@ char *trap_name(unsigned int trapnum);
unsigned int interrupted_pc(thread_t);
+void
+i386_exception(
+ int exc,
+ int code,
+ int subcode) __attribute__ ((noreturn));
+
+extern void
+thread_kdb_return(void);
+
#endif /* !__ASSEMBLER__ */
#endif /* _I386_TRAP_H_ */
diff --git a/i386/i386/user_ldt.c b/i386/i386/user_ldt.c
index 74c10a4c..d8bdb90f 100644
--- a/i386/i386/user_ldt.c
+++ b/i386/i386/user_ldt.c
@@ -43,67 +43,17 @@
#include "ldt.h"
#include "vm_param.h"
-char acc_type[8][3] = {
- /* code stack data */
- { 0, 0, 1 }, /* data */
- { 0, 1, 1 }, /* data, writable */
- { 0, 0, 1 }, /* data, expand-down */
- { 0, 1, 1 }, /* data, writable, expand-down */
- { 1, 0, 0 }, /* code */
- { 1, 0, 1 }, /* code, readable */
- { 1, 0, 0 }, /* code, conforming */
- { 1, 0, 1 }, /* code, readable, conforming */
-};
-
-boolean_t selector_check(thread, sel, type)
- thread_t thread;
- int sel;
- int type; /* code, stack, data */
-{
- struct user_ldt *ldt;
- int access;
-
- ldt = thread->pcb->ims.ldt;
- if (ldt == 0) {
- switch (type) {
- case S_CODE:
- return sel == USER_CS;
- case S_STACK:
- return sel == USER_DS;
- case S_DATA:
- return sel == 0 ||
- sel == USER_CS ||
- sel == USER_DS;
- }
- }
-
- if (type != S_DATA && sel == 0)
- return FALSE;
- if ((sel & (SEL_LDT|SEL_PL)) != (SEL_LDT|SEL_PL_U)
- || sel > ldt->desc.limit_low)
- return FALSE;
-
- access = ldt->ldt[sel_idx(sel)].access;
-
- if ((access & (ACC_P|ACC_PL|ACC_TYPE_USER|SZ_64))
- != (ACC_P|ACC_PL_U|ACC_TYPE_USER))
- return FALSE;
- /* present, pl == pl.user, not system, not 64bits */
-
- return acc_type[(access & 0xe)>>1][type];
-}
-
/*
* Add the descriptors to the LDT, starting with
* the descriptor for 'first_selector'.
*/
kern_return_t
-i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
- thread_t thread;
- int first_selector;
- struct real_descriptor *desc_list;
- unsigned int count;
- boolean_t desc_list_inline;
+i386_set_ldt(
+ thread_t thread,
+ int first_selector,
+ struct real_descriptor *desc_list,
+ unsigned int count,
+ boolean_t desc_list_inline)
{
user_ldt_t new_ldt, old_ldt, temp;
struct real_descriptor *dp;
@@ -305,14 +255,14 @@ i386_set_ldt(thread, first_selector, desc_list, count, desc_list_inline)
kern_return_t
i386_get_ldt(thread, first_selector, selector_count, desc_list, count)
- thread_t thread;
+ const thread_t thread;
int first_selector;
int selector_count; /* number wanted */
struct real_descriptor **desc_list; /* in/out */
unsigned int *count; /* in/out */
{
struct user_ldt *user_ldt;
- pcb_t pcb = thread->pcb;
+ pcb_t pcb;
int first_desc = sel_idx(first_selector);
unsigned int ldt_count;
vm_size_t ldt_size;
@@ -326,6 +276,7 @@ i386_get_ldt(thread, first_selector, selector_count, desc_list, count)
if (first_desc + selector_count >= 8192)
return KERN_INVALID_ARGUMENT;
+ pcb = thread->pcb;
addr = 0;
size = 0;
@@ -416,8 +367,7 @@ i386_get_ldt(thread, first_selector, selector_count, desc_list, count)
}
void
-user_ldt_free(user_ldt)
- user_ldt_t user_ldt;
+user_ldt_free(user_ldt_t user_ldt)
{
#ifdef MACH_PV_DESCRIPTORS
int i;
@@ -481,7 +431,7 @@ i386_set_gdt (thread_t thread, int *selector, struct real_descriptor desc)
}
kern_return_t
-i386_get_gdt (thread_t thread, int selector, struct real_descriptor *desc)
+i386_get_gdt (const thread_t thread, int selector, struct real_descriptor *desc)
{
if (thread == THREAD_NULL)
return KERN_INVALID_ARGUMENT;
diff --git a/i386/i386/user_ldt.h b/i386/i386/user_ldt.h
index 6c6c858e..26caa274 100644
--- a/i386/i386/user_ldt.h
+++ b/i386/i386/user_ldt.h
@@ -44,15 +44,7 @@ struct user_ldt {
};
typedef struct user_ldt * user_ldt_t;
-/*
- * Check code/stack/data selector values against LDT if present.
- */
-#define S_CODE 0 /* code segment */
-#define S_STACK 1 /* stack segment */
-#define S_DATA 2 /* data segment */
-
-extern boolean_t selector_check(thread_t thread,
- int sel,
- int type);
+extern void
+user_ldt_free(user_ldt_t user_ldt);
#endif /* _I386_USER_LDT_H_ */
diff --git a/i386/i386/vm_param.h b/i386/i386/vm_param.h
index ffd91d65..2635c2cd 100644
--- a/i386/i386/vm_param.h
+++ b/i386/i386/vm_param.h
@@ -23,6 +23,8 @@
#ifndef _I386_KERNEL_I386_VM_PARAM_
#define _I386_KERNEL_I386_VM_PARAM_
+#include <kern/macros.h>
+
/* XXX use xu/vm_param.h */
#include <mach/vm_param.h>
#ifdef MACH_PV_PAGETABLES
@@ -56,9 +58,9 @@
/* Reserve mapping room for kmem. */
#ifdef MACH_XEN
-#define VM_KERNEL_MAP_SIZE (224 * 1024 * 1024)
+#define VM_KERNEL_MAP_SIZE (128 * 1024 * 1024)
#else
-#define VM_KERNEL_MAP_SIZE (192 * 1024 * 1024)
+#define VM_KERNEL_MAP_SIZE (96 * 1024 * 1024)
#endif
/* The kernel virtual address space is actually located
@@ -101,4 +103,51 @@
#define kvtolin(a) ((vm_offset_t)(a) - VM_MIN_KERNEL_ADDRESS + LINEAR_MIN_KERNEL_ADDRESS)
#define lintokv(a) ((vm_offset_t)(a) - LINEAR_MIN_KERNEL_ADDRESS + VM_MIN_KERNEL_ADDRESS)
+/*
+ * Physical memory properties.
+ */
+#define VM_PAGE_DMA_LIMIT DECL_CONST(0x1000000, UL)
+
+#ifdef MACH_XEN
+/* TODO Completely check Xen physical/virtual layout */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* MACH_XEN */
+#ifdef __LP64__
+#define VM_PAGE_MAX_SEGS 4
+#define VM_PAGE_DMA32_LIMIT DECL_CONST(0x100000000, UL)
+#define VM_PAGE_DIRECTMAP_LIMIT DECL_CONST(0x400000000000, UL)
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, UL)
+#else /* __LP64__ */
+#define VM_PAGE_DIRECTMAP_LIMIT (VM_MAX_KERNEL_ADDRESS \
+ - VM_MIN_KERNEL_ADDRESS \
+ - VM_KERNEL_MAP_SIZE + 1)
+#ifdef PAE
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0x10000000000000, ULL)
+#else /* PAE */
+#define VM_PAGE_MAX_SEGS 3
+#define VM_PAGE_HIGHMEM_LIMIT DECL_CONST(0xfffff000, UL)
+#endif /* PAE */
+#endif /* __LP64__ */
+#endif /* MACH_XEN */
+
+/*
+ * Physical segment indexes.
+ */
+#define VM_PAGE_SEG_DMA 0
+
+#ifdef __LP64__
+#define VM_PAGE_SEG_DMA32 1
+#define VM_PAGE_SEG_DIRECTMAP 2
+#define VM_PAGE_SEG_HIGHMEM 3
+#else /* __LP64__ */
+#define VM_PAGE_SEG_DMA32 1 /* Alias for the DIRECTMAP segment */
+#define VM_PAGE_SEG_DIRECTMAP 1
+#define VM_PAGE_SEG_HIGHMEM 2
+#endif /* __LP64__ */
+
#endif /* _I386_KERNEL_I386_VM_PARAM_ */
diff --git a/i386/i386/vm_tuning.h b/i386/i386/vm_tuning.h
deleted file mode 100644
index f54e110a..00000000
--- a/i386/i386/vm_tuning.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: i386/vm_tuning.h
- *
- * VM tuning parameters for the i386 (without reference bits).
- */
-
-#ifndef _I386_VM_TUNING_H_
-#define _I386_VM_TUNING_H_
-
-#endif /* _I386_VM_TUNING_H_ */
diff --git a/i386/i386/xen.h b/i386/i386/xen.h
index 5bdaf0b8..c6811873 100644
--- a/i386/i386/xen.h
+++ b/i386/i386/xen.h
@@ -21,6 +21,7 @@
#ifdef MACH_XEN
#ifndef __ASSEMBLER__
+#include <kern/macros.h>
#include <kern/printf.h>
#include <mach/machine/vm_types.h>
#include <mach/vm_param.h>
@@ -32,8 +33,7 @@
#include <xen/public/xen.h>
/* TODO: this should be moved in appropriate non-Xen place. */
-#define barrier() __asm__ __volatile__ ("": : :"memory")
-#define mb() __asm__ __volatile__("lock; addl $0,0(%esp)")
+#define mb() __asm__ __volatile__("lock; addl $0,0(%%esp)":::"memory")
#define rmb() mb()
#define wmb() mb()
MACH_INLINE unsigned long xchgl(volatile unsigned long *ptr, unsigned long x)
diff --git a/i386/i386at/acpi.c b/i386/i386at/acpi.c
new file mode 100644
index 00000000..ec8aeb1e
--- /dev/null
+++ b/i386/i386at/acpi.c
@@ -0,0 +1,82 @@
+/* acpi.c - get acpi tables. */
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2009 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <grub/glue.h>
+#include <grub/acpi.h>
+#include <grub/misc.h>
+
+struct grub_acpi_rsdp_v10 *
+grub_machine_acpi_get_rsdpv1 (void)
+{
+ int ebda_len;
+ grub_uint8_t *ebda, *ptr;
+
+ grub_dprintf ("acpi", "Looking for RSDP. Scanning EBDA\n");
+ ebda = (grub_uint8_t *) phystokv ((* ((grub_uint16_t *) phystokv (0x40e))) << 4);
+ ebda_len = * (grub_uint16_t *) ebda;
+ if (! ebda_len)
+ return 0;
+ for (ptr = ebda; ptr < ebda + 0x400; ptr += 16)
+ if (grub_memcmp (ptr, GRUB_RSDP_SIGNATURE, GRUB_RSDP_SIGNATURE_SIZE) == 0
+ && grub_byte_checksum (ptr, sizeof (struct grub_acpi_rsdp_v10)) == 0
+ && ((struct grub_acpi_rsdp_v10 *) ptr)->revision == 0)
+ return (struct grub_acpi_rsdp_v10 *) ptr;
+
+ grub_dprintf ("acpi", "Looking for RSDP. Scanning BIOS\n");
+ for (ptr = (grub_uint8_t *) phystokv (0xe0000); ptr < (grub_uint8_t *) phystokv (0x100000);
+ ptr += 16)
+ if (grub_memcmp (ptr, GRUB_RSDP_SIGNATURE, GRUB_RSDP_SIGNATURE_SIZE) == 0
+ && grub_byte_checksum (ptr, sizeof (struct grub_acpi_rsdp_v10)) == 0
+ && ((struct grub_acpi_rsdp_v10 *) ptr)->revision == 0)
+ return (struct grub_acpi_rsdp_v10 *) ptr;
+ return 0;
+}
+
+struct grub_acpi_rsdp_v20 *
+grub_machine_acpi_get_rsdpv2 (void)
+{
+ int ebda_len;
+ grub_uint8_t *ebda, *ptr;
+
+ grub_dprintf ("acpi", "Looking for RSDP. Scanning EBDA\n");
+ ebda = (grub_uint8_t *) phystokv ((* ((grub_uint16_t *) phystokv (0x40e))) << 4);
+ ebda_len = * (grub_uint16_t *) ebda;
+ if (! ebda_len)
+ return 0;
+ for (ptr = ebda; ptr < ebda + 0x400; ptr += 16)
+ if (grub_memcmp (ptr, GRUB_RSDP_SIGNATURE, GRUB_RSDP_SIGNATURE_SIZE) == 0
+ && grub_byte_checksum (ptr, sizeof (struct grub_acpi_rsdp_v10)) == 0
+ && ((struct grub_acpi_rsdp_v10 *) ptr)->revision != 0
+ && ((struct grub_acpi_rsdp_v20 *) ptr)->length < 1024
+ && grub_byte_checksum (ptr, ((struct grub_acpi_rsdp_v20 *) ptr)->length)
+ == 0)
+ return (struct grub_acpi_rsdp_v20 *) ptr;
+
+ grub_dprintf ("acpi", "Looking for RSDP. Scanning BIOS\n");
+ for (ptr = (grub_uint8_t *) phystokv (0xe0000); ptr < (grub_uint8_t *) phystokv (0x100000);
+ ptr += 16)
+ if (grub_memcmp (ptr, GRUB_RSDP_SIGNATURE, GRUB_RSDP_SIGNATURE_SIZE) == 0
+ && grub_byte_checksum (ptr, sizeof (struct grub_acpi_rsdp_v10)) == 0
+ && ((struct grub_acpi_rsdp_v10 *) ptr)->revision != 0
+ && ((struct grub_acpi_rsdp_v20 *) ptr)->length < 1024
+ && grub_byte_checksum (ptr, ((struct grub_acpi_rsdp_v20 *) ptr)->length)
+ == 0)
+ return (struct grub_acpi_rsdp_v20 *) ptr;
+ return 0;
+}
diff --git a/i386/i386at/acpihalt.c b/i386/i386at/acpihalt.c
new file mode 100644
index 00000000..23df44ff
--- /dev/null
+++ b/i386/i386at/acpihalt.c
@@ -0,0 +1,409 @@
+/*
+ * GRUB -- GRand Unified Bootloader
+ * Copyright (C) 2010 Free Software Foundation, Inc.
+ *
+ * GRUB is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * GRUB is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with GRUB. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <grub/glue.h>
+
+#ifdef GRUB_DSDT_TEST
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <errno.h>
+
+#define grub_dprintf(cond, args...) printf ( args )
+#define grub_printf printf
+typedef uint64_t grub_uint64_t;
+typedef uint32_t grub_uint32_t;
+typedef uint16_t grub_uint16_t;
+typedef uint8_t grub_uint8_t;
+
+#endif
+
+#include <grub/acpi.h>
+#ifndef GRUB_DSDT_TEST
+#include <grub/i18n.h>
+#else
+#define _(x) x
+#define N_(x) x
+#endif
+
+#ifndef GRUB_DSDT_TEST
+#include <grub/mm.h>
+#include <grub/misc.h>
+#include <grub/time.h>
+#include <grub/cpu/io.h>
+#endif
+
+static inline grub_uint32_t
+decode_length (const grub_uint8_t *ptr, int *numlen)
+{
+ int num_bytes, i;
+ grub_uint32_t ret;
+ if (*ptr < 64)
+ {
+ if (numlen)
+ *numlen = 1;
+ return *ptr;
+ }
+ num_bytes = *ptr >> 6;
+ if (numlen)
+ *numlen = num_bytes + 1;
+ ret = *ptr & 0xf;
+ ptr++;
+ for (i = 0; i < num_bytes; i++)
+ {
+ ret |= *ptr << (8 * i + 4);
+ ptr++;
+ }
+ return ret;
+}
+
+static inline grub_uint32_t
+skip_name_string (const grub_uint8_t *ptr, const grub_uint8_t *end)
+{
+ const grub_uint8_t *ptr0 = ptr;
+
+ while (ptr < end && (*ptr == '^' || *ptr == '\\'))
+ ptr++;
+ switch (*ptr)
+ {
+ case '.':
+ ptr++;
+ ptr += 8;
+ break;
+ case '/':
+ ptr++;
+ ptr += 1 + (*ptr) * 4;
+ break;
+ case 0:
+ ptr++;
+ break;
+ default:
+ ptr += 4;
+ break;
+ }
+ return ptr - ptr0;
+}
+
+static inline grub_uint32_t
+skip_data_ref_object (const grub_uint8_t *ptr, const grub_uint8_t *end)
+{
+ grub_dprintf ("acpi", "data type = 0x%x\n", *ptr);
+ switch (*ptr)
+ {
+ case GRUB_ACPI_OPCODE_PACKAGE:
+ case GRUB_ACPI_OPCODE_BUFFER:
+ return 1 + decode_length (ptr + 1, 0);
+ case GRUB_ACPI_OPCODE_ZERO:
+ case GRUB_ACPI_OPCODE_ONES:
+ case GRUB_ACPI_OPCODE_ONE:
+ return 1;
+ case GRUB_ACPI_OPCODE_BYTE_CONST:
+ return 2;
+ case GRUB_ACPI_OPCODE_WORD_CONST:
+ return 3;
+ case GRUB_ACPI_OPCODE_DWORD_CONST:
+ return 5;
+ case GRUB_ACPI_OPCODE_STRING_CONST:
+ {
+ const grub_uint8_t *ptr0 = ptr;
+ for (ptr++; ptr < end && *ptr; ptr++);
+ if (ptr == end)
+ return 0;
+ return ptr - ptr0 + 1;
+ }
+ default:
+ if (*ptr == '^' || *ptr == '\\' || *ptr == '_'
+ || (*ptr >= 'A' && *ptr <= 'Z'))
+ return skip_name_string (ptr, end);
+ grub_printf ("Unknown opcode 0x%x\n", *ptr);
+ return 0;
+ }
+}
+
+static inline grub_uint32_t
+skip_ext_op (const grub_uint8_t *ptr, const grub_uint8_t *end)
+{
+ const grub_uint8_t *ptr0 = ptr;
+ int add;
+ grub_dprintf ("acpi", "Extended opcode: 0x%x\n", *ptr);
+ switch (*ptr)
+ {
+ case GRUB_ACPI_EXTOPCODE_MUTEX:
+ ptr++;
+ ptr += skip_name_string (ptr, end);
+ ptr++;
+ break;
+ case GRUB_ACPI_EXTOPCODE_EVENT_OP:
+ ptr++;
+ ptr += skip_name_string (ptr, end);
+ break;
+ case GRUB_ACPI_EXTOPCODE_OPERATION_REGION:
+ ptr++;
+ ptr += skip_name_string (ptr, end);
+ ptr++;
+ ptr += add = skip_data_ref_object (ptr, end);
+ if (!add)
+ return 0;
+ ptr += add = skip_data_ref_object (ptr, end);
+ if (!add)
+ return 0;
+ break;
+ case GRUB_ACPI_EXTOPCODE_FIELD_OP:
+ case GRUB_ACPI_EXTOPCODE_DEVICE_OP:
+ case GRUB_ACPI_EXTOPCODE_PROCESSOR_OP:
+ case GRUB_ACPI_EXTOPCODE_POWER_RES_OP:
+ case GRUB_ACPI_EXTOPCODE_THERMAL_ZONE_OP:
+ case GRUB_ACPI_EXTOPCODE_INDEX_FIELD_OP:
+ case GRUB_ACPI_EXTOPCODE_BANK_FIELD_OP:
+ ptr++;
+ ptr += decode_length (ptr, 0);
+ break;
+ default:
+ grub_printf ("Unexpected extended opcode: 0x%x\n", *ptr);
+ return 0;
+ }
+ return ptr - ptr0;
+}
+
+static int
+get_sleep_type (grub_uint8_t *table, grub_uint8_t *ptr, grub_uint8_t *end,
+ grub_uint8_t *scope, int scope_len)
+{
+ grub_uint8_t *prev = table;
+
+ if (!ptr)
+ ptr = table + sizeof (struct grub_acpi_table_header);
+ while (ptr < end && prev < ptr)
+ {
+ int add;
+ prev = ptr;
+ grub_dprintf ("acpi", "Opcode 0x%x\n", *ptr);
+ grub_dprintf ("acpi", "Tell %x\n", (unsigned) (ptr - table));
+ switch (*ptr)
+ {
+ case GRUB_ACPI_OPCODE_EXTOP:
+ ptr++;
+ ptr += add = skip_ext_op (ptr, end);
+ if (!add)
+ return -1;
+ break;
+ case GRUB_ACPI_OPCODE_CREATE_WORD_FIELD:
+ case GRUB_ACPI_OPCODE_CREATE_BYTE_FIELD:
+ {
+ ptr += 5;
+ ptr += add = skip_data_ref_object (ptr, end);
+ if (!add)
+ return -1;
+ ptr += 4;
+ break;
+ }
+ case GRUB_ACPI_OPCODE_NAME:
+ ptr++;
+ if ((!scope || grub_memcmp (scope, "\\", scope_len) == 0) &&
+ (grub_memcmp (ptr, "_S5_", 4) == 0 || grub_memcmp (ptr, "\\_S5_", 4) == 0))
+ {
+ int ll;
+ grub_uint8_t *ptr2 = ptr;
+ grub_dprintf ("acpi", "S5 found\n");
+ ptr2 += skip_name_string (ptr, end);
+ if (*ptr2 != 0x12)
+ {
+ grub_printf ("Unknown opcode in _S5: 0x%x\n", *ptr2);
+ return -1;
+ }
+ ptr2++;
+ decode_length (ptr2, &ll);
+ ptr2 += ll;
+ ptr2++;
+ switch (*ptr2)
+ {
+ case GRUB_ACPI_OPCODE_ZERO:
+ return 0;
+ case GRUB_ACPI_OPCODE_ONE:
+ return 1;
+ case GRUB_ACPI_OPCODE_BYTE_CONST:
+ return ptr2[1];
+ default:
+ grub_printf ("Unknown data type in _S5: 0x%x\n", *ptr2);
+ return -1;
+ }
+ }
+ ptr += add = skip_name_string (ptr, end);
+ if (!add)
+ return -1;
+ ptr += add = skip_data_ref_object (ptr, end);
+ if (!add)
+ return -1;
+ break;
+ case GRUB_ACPI_OPCODE_SCOPE:
+ {
+ int scope_sleep_type;
+ int ll;
+ grub_uint8_t *name;
+ int name_len;
+
+ ptr++;
+ add = decode_length (ptr, &ll);
+ name = ptr + ll;
+ name_len = skip_name_string (name, ptr + add);
+ if (!name_len)
+ return -1;
+ scope_sleep_type = get_sleep_type (table, name + name_len,
+ ptr + add, name, name_len);
+ if (scope_sleep_type != -2)
+ return scope_sleep_type;
+ ptr += add;
+ break;
+ }
+ case GRUB_ACPI_OPCODE_IF:
+ case GRUB_ACPI_OPCODE_METHOD:
+ {
+ ptr++;
+ ptr += decode_length (ptr, 0);
+ break;
+ }
+ default:
+ grub_printf ("Unknown opcode 0x%x\n", *ptr);
+ return -1;
+ }
+ }
+
+ return -2;
+}
+
+#ifdef GRUB_DSDT_TEST
+int
+main (int argc, char **argv)
+{
+ FILE *f;
+ size_t len;
+ unsigned char *buf;
+ if (argc < 2)
+ printf ("Usage: %s FILE\n", argv[0]);
+ f = grub_util_fopen (argv[1], "rb");
+ if (!f)
+ {
+ printf ("Couldn't open file\n");
+ return 1;
+ }
+ fseek (f, 0, SEEK_END);
+ len = ftell (f);
+ fseek (f, 0, SEEK_SET);
+ buf = malloc (len);
+ if (!buf)
+ {
+ printf (_("error: %s.\n"), _("out of memory"));
+ fclose (f);
+ return 2;
+ }
+ if (fread (buf, 1, len, f) != len)
+ {
+ printf (_("cannot read `%s': %s"), argv[1], strerror (errno));
+ free (buf);
+ fclose (f);
+ return 2;
+ }
+
+ printf ("Sleep type = %d\n", get_sleep_type (buf, NULL, buf + len, NULL, 0));
+ free (buf);
+ fclose (f);
+ return 0;
+}
+
+#else
+
+void
+grub_acpi_halt (void)
+{
+ struct grub_acpi_rsdp_v20 *rsdp2;
+ struct grub_acpi_rsdp_v10 *rsdp1;
+ struct grub_acpi_table_header *rsdt;
+ grub_uint32_t *entry_ptr;
+ grub_uint32_t port = 0;
+ int sleep_type = -1;
+
+ rsdp2 = grub_acpi_get_rsdpv2 ();
+ if (rsdp2)
+ rsdp1 = &(rsdp2->rsdpv1);
+ else
+ rsdp1 = grub_acpi_get_rsdpv1 ();
+ grub_dprintf ("acpi", "rsdp1=%p\n", rsdp1);
+ if (!rsdp1)
+ return;
+
+ rsdt = (struct grub_acpi_table_header *)
+ io_map_cached (rsdp1->rsdt_addr, sizeof *rsdt);
+ rsdt = (struct grub_acpi_table_header *)
+ io_map_cached (rsdp1->rsdt_addr, rsdt->length);
+
+ for (entry_ptr = (grub_uint32_t *) (rsdt + 1);
+ entry_ptr < (grub_uint32_t *) (((grub_uint8_t *) rsdt)
+ + rsdt->length);
+ entry_ptr++)
+ {
+ if (grub_memcmp ((void *) io_map_cached (*entry_ptr, 4),
+ "FACP", 4) == 0)
+ {
+ struct grub_acpi_fadt *fadt = (struct grub_acpi_fadt *)
+ io_map_cached (*entry_ptr, sizeof *fadt);
+
+ struct grub_acpi_table_header *dsdt =
+ (struct grub_acpi_table_header *)
+ io_map_cached (fadt->dsdt_addr, sizeof *dsdt);
+ grub_uint8_t *buf = (grub_uint8_t *)
+ io_map_cached (fadt->dsdt_addr, dsdt->length);
+
+ port = fadt->pm1a;
+
+ grub_dprintf ("acpi", "PM1a port=%x\n", port);
+
+ if (grub_memcmp (dsdt->signature, "DSDT",
+ sizeof (dsdt->signature)) == 0
+ && sleep_type < 0)
+ sleep_type = get_sleep_type (buf, NULL, buf + dsdt->length,
+ NULL, 0);
+ }
+ else
+ if (grub_memcmp ((void *) io_map_cached (*entry_ptr, 4), "SSDT", 4) == 0
+ && sleep_type < 0)
+ {
+ struct grub_acpi_table_header *ssdt
+ = (struct grub_acpi_table_header *) (grub_addr_t)
+ io_map_cached (*entry_ptr, sizeof *ssdt);
+ grub_uint8_t *buf = (grub_uint8_t *)
+ io_map_cached (*entry_ptr, ssdt->length);
+
+ grub_dprintf ("acpi", "SSDT = %p\n", ssdt);
+
+ sleep_type = get_sleep_type (buf, NULL, buf + ssdt->length, NULL, 0);
+ }
+ }
+
+ grub_dprintf ("acpi", "SLP_TYP = %d, port = 0x%x\n", sleep_type, port);
+ if (port && sleep_type >= 0 && sleep_type < 8)
+ grub_outw (GRUB_ACPI_SLP_EN | (sleep_type << GRUB_ACPI_SLP_TYP_OFFSET),
+ port & 0xffff);
+
+ grub_millisleep (1500);
+
+ /* TRANSLATORS: It's computer shutdown using ACPI, not disabling ACPI. */
+ grub_puts_ (N_("ACPI shutdown failed"));
+}
+#endif
diff --git a/i386/i386at/acpihalt.h b/i386/i386at/acpihalt.h
new file mode 100644
index 00000000..a4fdb075
--- /dev/null
+++ b/i386/i386at/acpihalt.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (c) 2014 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ACPIHALT_H_
+#define _ACPIHALT_H_
+
+void grub_acpi_halt (void);
+
+#endif /* _ACPIHALT_H_ */
diff --git a/i386/i386at/autoconf.c b/i386/i386at/autoconf.c
index 93c71412..908c3ec0 100644
--- a/i386/i386at/autoconf.c
+++ b/i386/i386at/autoconf.c
@@ -38,12 +38,12 @@
#if NCOM > 0
extern struct bus_driver comdriver;
-extern void comintr();
+#include <i386at/com.h>
#endif /* NCOM */
#if NLPR > 0
extern struct bus_driver lprdriver;
-extern void lprintr();
+#include <i386at/lpr.h>
#endif /* NLPR */
struct bus_ctlr bus_master_init[] = {
@@ -92,9 +92,9 @@ struct bus_device bus_device_init[] = {
*/
void probeio(void)
{
- register struct bus_device *device;
- register struct bus_ctlr *master;
- int i = 0;
+ struct bus_device *device;
+ struct bus_ctlr *master;
+ int i = 0;
for (master = bus_master_init; master->driver; master++)
{
@@ -122,7 +122,7 @@ void probeio(void)
}
void take_dev_irq(
- struct bus_device *dev)
+ const struct bus_device *dev)
{
int pic = (int)dev->sysdep1;
@@ -144,7 +144,7 @@ void take_dev_irq(
}
void take_ctlr_irq(
- struct bus_ctlr *ctlr)
+ const struct bus_ctlr *ctlr)
{
int pic = ctlr->sysdep1;
if (intpri[pic] == 0) {
diff --git a/i386/i386at/biosmem.c b/i386/i386at/biosmem.c
new file mode 100644
index 00000000..a7a440ef
--- /dev/null
+++ b/i386/i386at/biosmem.c
@@ -0,0 +1,910 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <string.h>
+#include <i386/model_dep.h>
+#include <i386at/biosmem.h>
+#include <i386at/elf.h>
+#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <mach/vm_param.h>
+#include <mach/xen.h>
+#include <mach/machine/multiboot.h>
+#include <sys/types.h>
+#include <vm/vm_page.h>
+
+#define __boot
+#define __bootdata
+#define __init
+
+#define boot_memmove memmove
+#define boot_panic panic
+#define boot_strlen strlen
+
+#define BOOT_CGAMEM phystokv(0xb8000)
+#define BOOT_CGACHARS (80 * 25)
+#define BOOT_CGACOLOR 0x7
+
+extern char _start, _end;
+
+/*
+ * Maximum number of entries in the BIOS memory map.
+ *
+ * Because of adjustments of overlapping ranges, the memory map can grow
+ * to twice this size.
+ */
+#define BIOSMEM_MAX_MAP_SIZE 128
+
+/*
+ * Memory range types.
+ */
+#define BIOSMEM_TYPE_AVAILABLE 1
+#define BIOSMEM_TYPE_RESERVED 2
+#define BIOSMEM_TYPE_ACPI 3
+#define BIOSMEM_TYPE_NVS 4
+#define BIOSMEM_TYPE_UNUSABLE 5
+#define BIOSMEM_TYPE_DISABLED 6
+
+/*
+ * Memory map entry.
+ */
+struct biosmem_map_entry {
+ uint64_t base_addr;
+ uint64_t length;
+ unsigned int type;
+};
+
+/*
+ * Contiguous block of physical memory.
+ *
+ * Tha "available" range records what has been passed to the VM system as
+ * available inside the segment.
+ */
+struct biosmem_segment {
+ phys_addr_t start;
+ phys_addr_t end;
+ phys_addr_t avail_start;
+ phys_addr_t avail_end;
+};
+
+/*
+ * Memory map built from the information passed by the boot loader.
+ *
+ * If the boot loader didn't pass a valid memory map, a simple map is built
+ * based on the mem_lower and mem_upper multiboot fields.
+ */
+static struct biosmem_map_entry biosmem_map[BIOSMEM_MAX_MAP_SIZE * 2]
+ __bootdata;
+static unsigned int biosmem_map_size __bootdata;
+
+/*
+ * Physical segment boundaries.
+ */
+static struct biosmem_segment biosmem_segments[VM_PAGE_MAX_SEGS] __bootdata;
+
+/*
+ * Boundaries of the simple bootstrap heap.
+ *
+ * This heap is located above BIOS memory.
+ */
+static uint32_t biosmem_heap_start __bootdata;
+static uint32_t biosmem_heap_cur __bootdata;
+static uint32_t biosmem_heap_end __bootdata;
+
+static char biosmem_panic_toobig_msg[] __bootdata
+ = "biosmem: too many memory map entries";
+#ifndef MACH_HYP
+static char biosmem_panic_setup_msg[] __bootdata
+ = "biosmem: unable to set up the early memory allocator";
+#endif /* MACH_HYP */
+static char biosmem_panic_noseg_msg[] __bootdata
+ = "biosmem: unable to find any memory segment";
+static char biosmem_panic_inval_msg[] __bootdata
+ = "biosmem: attempt to allocate 0 page";
+static char biosmem_panic_nomem_msg[] __bootdata
+ = "biosmem: unable to allocate memory";
+
+#ifndef MACH_HYP
+
+static void __boot
+biosmem_map_build(const struct multiboot_raw_info *mbi)
+{
+ struct multiboot_raw_mmap_entry *mb_entry, *mb_end;
+ struct biosmem_map_entry *start, *entry, *end;
+ unsigned long addr;
+
+ addr = phystokv(mbi->mmap_addr);
+ mb_entry = (struct multiboot_raw_mmap_entry *)addr;
+ mb_end = (struct multiboot_raw_mmap_entry *)(addr + mbi->mmap_length);
+ start = biosmem_map;
+ entry = start;
+ end = entry + BIOSMEM_MAX_MAP_SIZE;
+
+ while ((mb_entry < mb_end) && (entry < end)) {
+ entry->base_addr = mb_entry->base_addr;
+ entry->length = mb_entry->length;
+ entry->type = mb_entry->type;
+
+ mb_entry = (void *)mb_entry + sizeof(mb_entry->size) + mb_entry->size;
+ entry++;
+ }
+
+ biosmem_map_size = entry - start;
+}
+
+static void __boot
+biosmem_map_build_simple(const struct multiboot_raw_info *mbi)
+{
+ struct biosmem_map_entry *entry;
+
+ entry = biosmem_map;
+ entry->base_addr = 0;
+ entry->length = mbi->mem_lower << 10;
+ entry->type = BIOSMEM_TYPE_AVAILABLE;
+
+ entry++;
+ entry->base_addr = BIOSMEM_END;
+ entry->length = mbi->mem_upper << 10;
+ entry->type = BIOSMEM_TYPE_AVAILABLE;
+
+ biosmem_map_size = 2;
+}
+
+#endif /* MACH_HYP */
+
+static int __boot
+biosmem_map_entry_is_invalid(const struct biosmem_map_entry *entry)
+{
+ return (entry->base_addr + entry->length) <= entry->base_addr;
+}
+
+static void __boot
+biosmem_map_filter(void)
+{
+ struct biosmem_map_entry *entry;
+ unsigned int i;
+
+ i = 0;
+
+ while (i < biosmem_map_size) {
+ entry = &biosmem_map[i];
+
+ if (biosmem_map_entry_is_invalid(entry)) {
+ biosmem_map_size--;
+ boot_memmove(entry, entry + 1,
+ (biosmem_map_size - i) * sizeof(*entry));
+ continue;
+ }
+
+ i++;
+ }
+}
+
+static void __boot
+biosmem_map_sort(void)
+{
+ struct biosmem_map_entry tmp;
+ unsigned int i, j;
+
+ /*
+ * Simple insertion sort.
+ */
+ for (i = 1; i < biosmem_map_size; i++) {
+ tmp = biosmem_map[i];
+
+ for (j = i - 1; j < i; j--) {
+ if (biosmem_map[j].base_addr < tmp.base_addr)
+ break;
+
+ biosmem_map[j + 1] = biosmem_map[j];
+ }
+
+ biosmem_map[j + 1] = tmp;
+ }
+}
+
+static void __boot
+biosmem_map_adjust(void)
+{
+ struct biosmem_map_entry tmp, *a, *b, *first, *second;
+ uint64_t a_end, b_end, last_end;
+ unsigned int i, j, last_type;
+
+ biosmem_map_filter();
+
+ /*
+ * Resolve overlapping areas, giving priority to most restrictive
+ * (i.e. numerically higher) types.
+ */
+ for (i = 0; i < biosmem_map_size; i++) {
+ a = &biosmem_map[i];
+ a_end = a->base_addr + a->length;
+
+ j = i + 1;
+
+ while (j < biosmem_map_size) {
+ b = &biosmem_map[j];
+ b_end = b->base_addr + b->length;
+
+ if ((a->base_addr >= b_end) || (a_end <= b->base_addr)) {
+ j++;
+ continue;
+ }
+
+ if (a->base_addr < b->base_addr) {
+ first = a;
+ second = b;
+ } else {
+ first = b;
+ second = a;
+ }
+
+ if (a_end > b_end) {
+ last_end = a_end;
+ last_type = a->type;
+ } else {
+ last_end = b_end;
+ last_type = b->type;
+ }
+
+ tmp.base_addr = second->base_addr;
+ tmp.length = MIN(a_end, b_end) - tmp.base_addr;
+ tmp.type = MAX(a->type, b->type);
+ first->length = tmp.base_addr - first->base_addr;
+ second->base_addr += tmp.length;
+ second->length = last_end - second->base_addr;
+ second->type = last_type;
+
+ /*
+ * Filter out invalid entries.
+ */
+ if (biosmem_map_entry_is_invalid(a)
+ && biosmem_map_entry_is_invalid(b)) {
+ *a = tmp;
+ biosmem_map_size--;
+ memmove(b, b + 1, (biosmem_map_size - j) * sizeof(*b));
+ continue;
+ } else if (biosmem_map_entry_is_invalid(a)) {
+ *a = tmp;
+ j++;
+ continue;
+ } else if (biosmem_map_entry_is_invalid(b)) {
+ *b = tmp;
+ j++;
+ continue;
+ }
+
+ if (tmp.type == a->type)
+ first = a;
+ else if (tmp.type == b->type)
+ first = b;
+ else {
+
+ /*
+ * If the overlapping area can't be merged with one of its
+ * neighbors, it must be added as a new entry.
+ */
+
+ if (biosmem_map_size >= ARRAY_SIZE(biosmem_map))
+ boot_panic(biosmem_panic_toobig_msg);
+
+ biosmem_map[biosmem_map_size] = tmp;
+ biosmem_map_size++;
+ j++;
+ continue;
+ }
+
+ if (first->base_addr > tmp.base_addr)
+ first->base_addr = tmp.base_addr;
+
+ first->length += tmp.length;
+ j++;
+ }
+ }
+
+ biosmem_map_sort();
+}
+
+static int __boot
+biosmem_map_find_avail(phys_addr_t *phys_start, phys_addr_t *phys_end)
+{
+ const struct biosmem_map_entry *entry, *map_end;
+ phys_addr_t seg_start, seg_end;
+ uint64_t start, end;
+
+ seg_start = (phys_addr_t)-1;
+ seg_end = (phys_addr_t)-1;
+ map_end = biosmem_map + biosmem_map_size;
+
+ for (entry = biosmem_map; entry < map_end; entry++) {
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ continue;
+
+ start = vm_page_round(entry->base_addr);
+
+ if (start >= *phys_end)
+ break;
+
+ end = vm_page_trunc(entry->base_addr + entry->length);
+
+ if ((start < end) && (start < *phys_end) && (end > *phys_start)) {
+ if (seg_start == (phys_addr_t)-1)
+ seg_start = start;
+
+ seg_end = end;
+ }
+ }
+
+ if ((seg_start == (phys_addr_t)-1) || (seg_end == (phys_addr_t)-1))
+ return -1;
+
+ if (seg_start > *phys_start)
+ *phys_start = seg_start;
+
+ if (seg_end < *phys_end)
+ *phys_end = seg_end;
+
+ return 0;
+}
+
+static void __boot
+biosmem_set_segment(unsigned int seg_index, phys_addr_t start, phys_addr_t end)
+{
+ biosmem_segments[seg_index].start = start;
+ biosmem_segments[seg_index].end = end;
+}
+
+static phys_addr_t __boot
+biosmem_segment_end(unsigned int seg_index)
+{
+ return biosmem_segments[seg_index].end;
+}
+
+static phys_addr_t __boot
+biosmem_segment_size(unsigned int seg_index)
+{
+ return biosmem_segments[seg_index].end - biosmem_segments[seg_index].start;
+}
+
+#ifndef MACH_HYP
+
+static void __boot
+biosmem_save_cmdline_sizes(struct multiboot_raw_info *mbi)
+{
+ struct multiboot_raw_module *mod;
+ uint32_t i, va;
+
+ if (mbi->flags & MULTIBOOT_LOADER_CMDLINE) {
+ va = phystokv(mbi->cmdline);
+ mbi->unused0 = boot_strlen((char *)va) + 1;
+ }
+
+ if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
+ unsigned long addr;
+
+ addr = phystokv(mbi->mods_addr);
+
+ for (i = 0; i < mbi->mods_count; i++) {
+ mod = (struct multiboot_raw_module *)addr + i;
+ va = phystokv(mod->string);
+ mod->reserved = boot_strlen((char *)va) + 1;
+ }
+ }
+}
+
+static void __boot
+biosmem_find_boot_data_update(uint32_t min, uint32_t *start, uint32_t *end,
+ uint32_t data_start, uint32_t data_end)
+{
+ if ((min <= data_start) && (data_start < *start)) {
+ *start = data_start;
+ *end = data_end;
+ }
+}
+
+/*
+ * Find the first boot data in the given range, and return their containing
+ * area (start address is returned directly, end address is returned in end).
+ * The following are considered boot data :
+ * - the kernel
+ * - the kernel command line
+ * - the module table
+ * - the modules
+ * - the modules command lines
+ * - the ELF section header table
+ * - the ELF .shstrtab, .symtab and .strtab sections
+ *
+ * If no boot data was found, 0 is returned, and the end address isn't set.
+ */
+static uint32_t __boot
+biosmem_find_boot_data(const struct multiboot_raw_info *mbi, uint32_t min,
+ uint32_t max, uint32_t *endp)
+{
+ struct multiboot_raw_module *mod;
+ struct elf_shdr *shdr;
+ uint32_t i, start, end = end;
+ unsigned long tmp;
+
+ start = max;
+
+ biosmem_find_boot_data_update(min, &start, &end, _kvtophys(&_start),
+ _kvtophys(&_end));
+
+ if ((mbi->flags & MULTIBOOT_LOADER_CMDLINE) && (mbi->cmdline != 0))
+ biosmem_find_boot_data_update(min, &start, &end, mbi->cmdline,
+ mbi->cmdline + mbi->unused0);
+
+ if (mbi->flags & MULTIBOOT_LOADER_MODULES) {
+ i = mbi->mods_count * sizeof(struct multiboot_raw_module);
+ biosmem_find_boot_data_update(min, &start, &end, mbi->mods_addr,
+ mbi->mods_addr + i);
+ tmp = phystokv(mbi->mods_addr);
+
+ for (i = 0; i < mbi->mods_count; i++) {
+ mod = (struct multiboot_raw_module *)tmp + i;
+ biosmem_find_boot_data_update(min, &start, &end, mod->mod_start,
+ mod->mod_end);
+
+ if (mod->string != 0)
+ biosmem_find_boot_data_update(min, &start, &end, mod->string,
+ mod->string + mod->reserved);
+ }
+ }
+
+ if (mbi->flags & MULTIBOOT_LOADER_SHDR) {
+ tmp = mbi->shdr_num * mbi->shdr_size;
+ biosmem_find_boot_data_update(min, &start, &end, mbi->shdr_addr,
+ mbi->shdr_addr + tmp);
+ tmp = phystokv(mbi->shdr_addr);
+
+ for (i = 0; i < mbi->shdr_num; i++) {
+ shdr = (struct elf_shdr *)(tmp + (i * mbi->shdr_size));
+
+ if ((shdr->type != ELF_SHT_SYMTAB)
+ && (shdr->type != ELF_SHT_STRTAB))
+ continue;
+
+ biosmem_find_boot_data_update(min, &start, &end, shdr->addr,
+ shdr->addr + shdr->size);
+ }
+ }
+
+ if (start == max)
+ return 0;
+
+ *endp = end;
+ return start;
+}
+
+static void __boot
+biosmem_setup_allocator(struct multiboot_raw_info *mbi)
+{
+ uint32_t heap_start, heap_end, max_heap_start, max_heap_end;
+ uint32_t mem_end, next;
+
+ /*
+ * Find some memory for the heap. Look for the largest unused area in
+ * upper memory, carefully avoiding all boot data.
+ */
+ mem_end = vm_page_trunc((mbi->mem_upper + 1024) << 10);
+
+#ifndef __LP64__
+ if (mem_end > VM_PAGE_DIRECTMAP_LIMIT)
+ mem_end = VM_PAGE_DIRECTMAP_LIMIT;
+#endif /* __LP64__ */
+
+ max_heap_start = 0;
+ max_heap_end = 0;
+ next = BIOSMEM_END;
+
+ do {
+ heap_start = next;
+ heap_end = biosmem_find_boot_data(mbi, heap_start, mem_end, &next);
+
+ if (heap_end == 0) {
+ heap_end = mem_end;
+ next = 0;
+ }
+
+ if ((heap_end - heap_start) > (max_heap_end - max_heap_start)) {
+ max_heap_start = heap_start;
+ max_heap_end = heap_end;
+ }
+ } while (next != 0);
+
+ max_heap_start = vm_page_round(max_heap_start);
+ max_heap_end = vm_page_trunc(max_heap_end);
+
+ if (max_heap_start >= max_heap_end)
+ boot_panic(biosmem_panic_setup_msg);
+
+ biosmem_heap_start = max_heap_start;
+ biosmem_heap_end = max_heap_end;
+ biosmem_heap_cur = biosmem_heap_end;
+}
+
+#endif /* MACH_HYP */
+
+static void __boot
+biosmem_bootstrap_common(void)
+{
+ phys_addr_t phys_start, phys_end, last_addr;
+ int error;
+
+ biosmem_map_adjust();
+
+ phys_start = BIOSMEM_BASE;
+ phys_end = VM_PAGE_DMA_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ boot_panic(biosmem_panic_noseg_msg);
+
+ biosmem_set_segment(VM_PAGE_SEG_DMA, phys_start, phys_end);
+ last_addr = phys_end;
+
+ phys_start = VM_PAGE_DMA_LIMIT;
+#ifdef VM_PAGE_DMA32_LIMIT
+ phys_end = VM_PAGE_DMA32_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ goto out;
+
+ biosmem_set_segment(VM_PAGE_SEG_DMA32, phys_start, phys_end);
+ last_addr = phys_end;
+
+ phys_start = VM_PAGE_DMA32_LIMIT;
+#endif /* VM_PAGE_DMA32_LIMIT */
+ phys_end = VM_PAGE_DIRECTMAP_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ goto out;
+
+ biosmem_set_segment(VM_PAGE_SEG_DIRECTMAP, phys_start, phys_end);
+ last_addr = phys_end;
+
+ phys_start = VM_PAGE_DIRECTMAP_LIMIT;
+ phys_end = VM_PAGE_HIGHMEM_LIMIT;
+ error = biosmem_map_find_avail(&phys_start, &phys_end);
+
+ if (error)
+ goto out;
+
+ biosmem_set_segment(VM_PAGE_SEG_HIGHMEM, phys_start, phys_end);
+
+out:
+ /* XXX phys_last_addr must be part of the direct physical mapping */
+ phys_last_addr = last_addr;
+}
+
+#ifdef MACH_HYP
+
+void
+biosmem_xen_bootstrap(void)
+{
+ struct biosmem_map_entry *entry;
+
+ entry = biosmem_map;
+ entry->base_addr = 0;
+ entry->length = boot_info.nr_pages << PAGE_SHIFT;
+ entry->type = BIOSMEM_TYPE_AVAILABLE;
+
+ biosmem_map_size = 1;
+
+ biosmem_bootstrap_common();
+
+ biosmem_heap_start = _kvtophys(boot_info.pt_base)
+ + (boot_info.nr_pt_frames + 3) * 0x1000;
+ biosmem_heap_end = boot_info.nr_pages << PAGE_SHIFT;
+
+#ifndef __LP64__
+ /* TODO Check that this actually makes sense */
+ if (biosmem_heap_end > VM_PAGE_DIRECTMAP_LIMIT)
+ biosmem_heap_end = VM_PAGE_DIRECTMAP_LIMIT;
+#endif /* __LP64__ */
+
+ /*
+ * XXX Allocation on Xen must be bottom-up :
+ * At the "start of day", only 512k are available after the boot
+ * data. The pmap module then creates a 4g mapping so all physical
+ * memory is available, but it uses this allocator to do so.
+ * Therefore, it must return pages from this small 512k regions
+ * first.
+ */
+ biosmem_heap_cur = biosmem_heap_start;
+}
+
+#else /* MACH_HYP */
+
+void __boot
+biosmem_bootstrap(struct multiboot_raw_info *mbi)
+{
+ if (mbi->flags & MULTIBOOT_LOADER_MMAP)
+ biosmem_map_build(mbi);
+ else
+ biosmem_map_build_simple(mbi);
+
+ biosmem_bootstrap_common();
+
+ /*
+ * The kernel and modules command lines will be memory mapped later
+ * during initialization. Their respective sizes must be saved.
+ */
+ biosmem_save_cmdline_sizes(mbi);
+ biosmem_setup_allocator(mbi);
+}
+
+#endif /* MACH_HYP */
+
+unsigned long __boot
+biosmem_bootalloc(unsigned int nr_pages)
+{
+ unsigned long addr, size;
+
+ assert(!vm_page_ready());
+
+ size = vm_page_ptoa(nr_pages);
+
+ if (size == 0)
+ boot_panic(biosmem_panic_inval_msg);
+
+#ifdef MACH_HYP
+ addr = biosmem_heap_cur;
+#else /* MACH_HYP */
+ /* Top-down allocation to avoid unnecessarily filling DMA segments */
+ addr = biosmem_heap_cur - size;
+#endif /* MACH_HYP */
+
+ if ((addr < biosmem_heap_start) || (addr > biosmem_heap_cur))
+ boot_panic(biosmem_panic_nomem_msg);
+
+#ifdef MACH_HYP
+ biosmem_heap_cur += size;
+#else /* MACH_HYP */
+ biosmem_heap_cur = addr;
+#endif /* MACH_HYP */
+
+ return addr;
+}
+
+phys_addr_t __boot
+biosmem_directmap_size(void)
+{
+ if (biosmem_segment_size(VM_PAGE_SEG_DIRECTMAP) != 0)
+ return biosmem_segment_end(VM_PAGE_SEG_DIRECTMAP);
+ else if (biosmem_segment_size(VM_PAGE_SEG_DMA32) != 0)
+ return biosmem_segment_end(VM_PAGE_SEG_DMA32);
+ else
+ return biosmem_segment_end(VM_PAGE_SEG_DMA);
+}
+
+static const char * __init
+biosmem_type_desc(unsigned int type)
+{
+ switch (type) {
+ case BIOSMEM_TYPE_AVAILABLE:
+ return "available";
+ case BIOSMEM_TYPE_RESERVED:
+ return "reserved";
+ case BIOSMEM_TYPE_ACPI:
+ return "ACPI";
+ case BIOSMEM_TYPE_NVS:
+ return "ACPI NVS";
+ case BIOSMEM_TYPE_UNUSABLE:
+ return "unusable";
+ default:
+ return "unknown (reserved)";
+ }
+}
+
+static void __init
+biosmem_map_show(void)
+{
+ const struct biosmem_map_entry *entry, *end;
+
+ printf("biosmem: physical memory map:\n");
+
+ for (entry = biosmem_map, end = entry + biosmem_map_size;
+ entry < end;
+ entry++)
+ printf("biosmem: %018llx:%018llx, %s\n", entry->base_addr,
+ entry->base_addr + entry->length,
+ biosmem_type_desc(entry->type));
+
+ printf("biosmem: heap: %x-%x\n", biosmem_heap_start, biosmem_heap_end);
+}
+
+static void __init
+biosmem_load_segment(struct biosmem_segment *seg, uint64_t max_phys_end,
+ phys_addr_t phys_start, phys_addr_t phys_end,
+ phys_addr_t avail_start, phys_addr_t avail_end)
+{
+ unsigned int seg_index;
+
+ seg_index = seg - biosmem_segments;
+
+ if (phys_end > max_phys_end) {
+ if (max_phys_end <= phys_start) {
+ printf("biosmem: warning: segment %s physically unreachable, "
+ "not loaded\n", vm_page_seg_name(seg_index));
+ return;
+ }
+
+ printf("biosmem: warning: segment %s truncated to %#llx\n",
+ vm_page_seg_name(seg_index), max_phys_end);
+ phys_end = max_phys_end;
+ }
+
+ if ((avail_start < phys_start) || (avail_start >= phys_end))
+ avail_start = phys_start;
+
+ if ((avail_end <= phys_start) || (avail_end > phys_end))
+ avail_end = phys_end;
+
+ seg->avail_start = avail_start;
+ seg->avail_end = avail_end;
+ vm_page_load(seg_index, phys_start, phys_end, avail_start, avail_end);
+}
+
+void __init
+biosmem_setup(void)
+{
+ struct biosmem_segment *seg;
+ unsigned int i;
+
+ biosmem_map_show();
+
+ for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
+ if (biosmem_segment_size(i) == 0)
+ break;
+
+ seg = &biosmem_segments[i];
+ biosmem_load_segment(seg, VM_PAGE_HIGHMEM_LIMIT, seg->start, seg->end,
+ biosmem_heap_start, biosmem_heap_cur);
+ }
+}
+
+static void __init
+biosmem_free_usable_range(phys_addr_t start, phys_addr_t end)
+{
+ struct vm_page *page;
+
+ printf("biosmem: release to vm_page: %llx-%llx (%lluk)\n",
+ (unsigned long long)start, (unsigned long long)end,
+ (unsigned long long)((end - start) >> 10));
+
+ while (start < end) {
+ page = vm_page_lookup_pa(start);
+ assert(page != NULL);
+ vm_page_manage(page);
+ start += PAGE_SIZE;
+ }
+}
+
+static void __init
+biosmem_free_usable_update_start(phys_addr_t *start, phys_addr_t res_start,
+ phys_addr_t res_end)
+{
+ if ((*start >= res_start) && (*start < res_end))
+ *start = res_end;
+}
+
+static phys_addr_t __init
+biosmem_free_usable_start(phys_addr_t start)
+{
+ const struct biosmem_segment *seg;
+ unsigned int i;
+
+ biosmem_free_usable_update_start(&start, _kvtophys(&_start),
+ _kvtophys(&_end));
+ biosmem_free_usable_update_start(&start, biosmem_heap_start,
+ biosmem_heap_end);
+
+ for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
+ seg = &biosmem_segments[i];
+ biosmem_free_usable_update_start(&start, seg->avail_start,
+ seg->avail_end);
+ }
+
+ return start;
+}
+
+static int __init
+biosmem_free_usable_reserved(phys_addr_t addr)
+{
+ const struct biosmem_segment *seg;
+ unsigned int i;
+
+ if ((addr >= _kvtophys(&_start))
+ && (addr < _kvtophys(&_end)))
+ return 1;
+
+ if ((addr >= biosmem_heap_start) && (addr < biosmem_heap_end))
+ return 1;
+
+ for (i = 0; i < ARRAY_SIZE(biosmem_segments); i++) {
+ seg = &biosmem_segments[i];
+
+ if ((addr >= seg->avail_start) && (addr < seg->avail_end))
+ return 1;
+ }
+
+ return 0;
+}
+
+static phys_addr_t __init
+biosmem_free_usable_end(phys_addr_t start, phys_addr_t entry_end)
+{
+ while (start < entry_end) {
+ if (biosmem_free_usable_reserved(start))
+ break;
+
+ start += PAGE_SIZE;
+ }
+
+ return start;
+}
+
+static void __init
+biosmem_free_usable_entry(phys_addr_t start, phys_addr_t end)
+{
+ phys_addr_t entry_end;
+
+ entry_end = end;
+
+ for (;;) {
+ start = biosmem_free_usable_start(start);
+
+ if (start >= entry_end)
+ return;
+
+ end = biosmem_free_usable_end(start, entry_end);
+ biosmem_free_usable_range(start, end);
+ start = end;
+ }
+}
+
+void __init
+biosmem_free_usable(void)
+{
+ struct biosmem_map_entry *entry;
+ uint64_t start, end;
+ unsigned int i;
+
+ for (i = 0; i < biosmem_map_size; i++) {
+ entry = &biosmem_map[i];
+
+ if (entry->type != BIOSMEM_TYPE_AVAILABLE)
+ continue;
+
+ start = vm_page_round(entry->base_addr);
+
+ if (start >= VM_PAGE_HIGHMEM_LIMIT)
+ break;
+
+ end = vm_page_trunc(entry->base_addr + entry->length);
+
+ if (start < BIOSMEM_BASE)
+ start = BIOSMEM_BASE;
+
+ biosmem_free_usable_entry(start, end);
+ }
+}
diff --git a/i386/i386at/biosmem.h b/i386/i386at/biosmem.h
new file mode 100644
index 00000000..1db63f9f
--- /dev/null
+++ b/i386/i386at/biosmem.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_BIOSMEM_H
+#define _X86_BIOSMEM_H
+
+#include <mach/machine/vm_types.h>
+#include <mach/machine/multiboot.h>
+
+/*
+ * Address where the address of the Extended BIOS Data Area segment can be
+ * found.
+ */
+#define BIOSMEM_EBDA_PTR 0x40e
+
+/*
+ * Significant low memory addresses.
+ *
+ * The first 64 KiB are reserved for various reasons (e.g. to preserve BIOS
+ * data and to work around data corruption on some hardware).
+ */
+#define BIOSMEM_BASE 0x010000
+#define BIOSMEM_BASE_END 0x0a0000
+#define BIOSMEM_EXT_ROM 0x0e0000
+#define BIOSMEM_ROM 0x0f0000
+#define BIOSMEM_END 0x100000
+
+/*
+ * Early initialization of the biosmem module.
+ *
+ * This function processes the given multiboot data for BIOS-provided
+ * memory information, and sets up a bootstrap physical page allocator.
+ *
+ * It is called before paging is enabled.
+ */
+#ifdef MACH_HYP
+void biosmem_xen_bootstrap(void);
+#else /* MACH_HYP */
+void biosmem_bootstrap(struct multiboot_raw_info *mbi);
+#endif /* MACH_HYP */
+
+/*
+ * Allocate contiguous physical pages during bootstrap.
+ *
+ * This function is called before paging is enabled. It should only be used
+ * to allocate initial page table pages. Those pages are later loaded into
+ * the VM system (as reserved pages) which means they can be freed like other
+ * regular pages. Users should fix up the type of those pages once the VM
+ * system is initialized.
+ */
+unsigned long biosmem_bootalloc(unsigned int nr_pages);
+
+/*
+ * Return the amount of physical memory that can be directly mapped.
+ *
+ * This includes the size of both the DMA/DMA32 and DIRECTMAP segments.
+ */
+phys_addr_t biosmem_directmap_size(void);
+
+/*
+ * Set up physical memory based on the information obtained during bootstrap
+ * and load it in the VM system.
+ */
+void biosmem_setup(void);
+
+/*
+ * Free all usable memory.
+ *
+ * This includes ranges that weren't part of the bootstrap allocator initial
+ * heap, e.g. because they contained boot data.
+ */
+void biosmem_free_usable(void);
+
+#endif /* _X86_BIOSMEM_H */
diff --git a/i386/i386at/com.c b/i386/i386at/com.c
index 93c3faaa..84891bd2 100644
--- a/i386/i386at/com.c
+++ b/i386/i386at/com.c
@@ -49,11 +49,7 @@
#include <device/cons.h>
-int comprobe(), commctl();
-void comstart(struct tty *);
-void comstop(), comattach(), comintr();
static void comparam();
-int comgetstat(), comsetstat();
static vm_offset_t com_std[NCOM] = { 0 };
struct bus_device *cominfo[NCOM];
@@ -149,7 +145,7 @@ comprobe_general(struct bus_device *dev, int noisy)
type = "82450 or 16450";
outb(FIFO_CTL(addr), iFIFOENA | iFIFO14CH); /* Enable fifo */
if ((inb(FIFO_CTL(addr)) & iFIFO14CH) != 0)
- { /* Was it successfull */
+ { /* Was it successful */
/* if both bits are not set then broken xx550 */
if ((inb(FIFO_CTL(addr)) & iFIFO14CH) == iFIFO14CH)
{
@@ -173,9 +169,9 @@ comprobe_general(struct bus_device *dev, int noisy)
* all of bus_device_init
*/
int
-comprobe(int port, struct bus_device *dev)
+comprobe(vm_offset_t port, struct bus_ctlr *dev)
{
- return comprobe_general(dev, /*noisy*/ 0);
+ return comprobe_general((struct bus_device *)dev, /*noisy*/ 0);
}
/*
@@ -265,7 +261,7 @@ comcninit(struct consdev *cp)
outb(LINE_CTL(addr), iDLAB);
outb(BAUD_LSB(addr), divisorreg[RCBAUD] & 0xff);
outb(BAUD_MSB(addr), divisorreg[RCBAUD] >>8);
- outb(LINE_CTL(addr), i8BITS);
+ outb(LINE_CTL(addr), i8BITS);
outb(INTR_ENAB(addr), 0);
outb(MODEM_CTL(addr), iDTR|iRTS|iOUT2);
@@ -323,7 +319,7 @@ boolean_t com_reprobe(
}
io_return_t comopen(
- int dev,
+ dev_t dev,
int flag,
io_req_t ior)
{
@@ -342,6 +338,8 @@ io_return_t comopen(
*/
if (!com_reprobe(unit))
return D_NO_SUCH_DEVICE;
+ if ((isai = cominfo[unit]) == 0 || isai->alive == 0)
+ return D_NO_SUCH_DEVICE;
}
tp = &com_tty[unit];
@@ -402,8 +400,8 @@ io_return_t comopen(
return result;
}
-io_return_t comclose(dev, flag)
-int dev;
+void comclose(dev, flag)
+dev_t dev;
int flag;
{
struct tty *tp = &com_tty[minor(dev)];
@@ -418,18 +416,18 @@ int flag;
if (comfifo[minor(dev)] != 0)
outb(INTR_ID(addr), 0x00); /* Disable fifos */
}
- return 0;
+ return;
}
io_return_t comread(dev, ior)
-int dev;
+dev_t dev;
io_req_t ior;
{
return char_read(&com_tty[minor(dev)], ior);
}
io_return_t comwrite(dev, ior)
-int dev;
+dev_t dev;
io_req_t ior;
{
return char_write(&com_tty[minor(dev)], ior);
@@ -466,11 +464,11 @@ natural_t *count; /* out */
}
io_return_t
-comsetstat(dev, flavor, data, count)
-dev_t dev;
-int flavor;
-int * data;
-natural_t count;
+comsetstat(
+ dev_t dev,
+ int flavor,
+ int * data,
+ natural_t count)
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -496,10 +494,9 @@ natural_t count;
}
void
-comintr(unit)
-int unit;
+comintr(int unit)
{
- register struct tty *tp = &com_tty[unit];
+ struct tty *tp = &com_tty[unit];
u_short addr = cominfo[unit]->address;
static char comoverrun = 0;
char c, line, intr_id;
@@ -521,10 +518,35 @@ int unit;
case RECi:
case CTIi: /* Character timeout indication */
if (tp->t_state&TS_ISOPEN) {
+ int escape = 0;
while ((line = inb(LINE_STAT(addr))) & iDR) {
c = inb(TXRX(addr));
- ttyinput(c, tp);
+
+ if (c == 0x1b) {
+ escape = 1;
+ continue;
+ }
+
+#if MACH_KDB
+ if (escape && c == 'D'-('A'-1))
+ /* ctrl-alt-d pressed,
+ invoke debugger */
+ kdb_kintr();
+ else
+#endif /* MACH_KDB */
+ if (escape) {
+ ttyinput(0x1b, tp);
+ ttyinput(c, tp);
+ }
+ else
+ ttyinput(c, tp);
+
+ escape = 0;
}
+
+ if (escape)
+ /* just escape */
+ ttyinput(0x1b, tp);
} else
tt_open_wakeup(tp);
break;
@@ -547,8 +569,7 @@ int unit;
}
static void
-comparam(unit)
-register int unit;
+comparam(int unit)
{
struct tty *tp = &com_tty[unit];
u_short addr = (int)tp->t_addr;
@@ -617,10 +638,9 @@ comparm(int unit, int baud, int intr, int mode, int modem)
int comst_1, comst_2, comst_3, comst_4, comst_5 = 14;
void
-comstart(tp)
-struct tty *tp;
+comstart(struct tty *tp)
{
- char nch;
+ int nch;
#if 0
int i;
#endif
@@ -656,6 +676,8 @@ comst_4++;
}
#else
nch = getc(&tp->t_outq);
+ if (nch == -1)
+ return;
if ((nch & 0200) && ((tp->t_flags & LITOUT) == 0)) {
timeout((timer_func_t *)ttrstrt, (char *)tp, (nch & 0x7f) + 6);
tp->t_state |= TS_TIMEOUT;
@@ -698,8 +720,9 @@ printf("Tty %p was stuck\n", tp);
* Set receive modem state from modem status register.
*/
void
-fix_modem_state(unit, modem_stat)
-int unit, modem_stat;
+fix_modem_state(
+ int unit,
+ int modem_stat)
{
int stat = 0;
@@ -751,14 +774,14 @@ commodem_intr(
*/
int
commctl(
- register struct tty *tp,
- int bits,
- int how)
+ struct tty *tp,
+ int bits,
+ int how)
{
spl_t s;
int unit;
vm_offset_t dev_addr;
- register int b = 0; /* Suppress gcc warning */
+ int b = 0; /* Suppress gcc warning */
unit = minor(tp->t_dev);
@@ -817,9 +840,9 @@ commctl(
}
void
-comstop(tp, flags)
-register struct tty *tp;
-int flags;
+comstop(
+ struct tty *tp,
+ int flags)
{
if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
tp->t_state |= TS_FLUSH;
@@ -830,16 +853,16 @@ int flags;
* Code to be called from debugger.
*
*/
-void compr_addr(addr)
+void compr_addr(vm_offset_t addr)
{
/* The two line_stat prints may show different values, since
* touching some of the registers constitutes changing them.
*/
- printf("LINE_STAT(%x) %x\n",
+ printf("LINE_STAT(%lu) %x\n",
LINE_STAT(addr), inb(LINE_STAT(addr)));
- printf("TXRX(%x) %x, INTR_ENAB(%x) %x, INTR_ID(%x) %x, LINE_CTL(%x) %x,\n\
-MODEM_CTL(%x) %x, LINE_STAT(%x) %x, MODEM_STAT(%x) %x\n",
+ printf("TXRX(%lu) %x, INTR_ENAB(%lu) %x, INTR_ID(%lu) %x, LINE_CTL(%lu) %x,\n\
+MODEM_CTL(%lu) %x, LINE_STAT(%lu) %x, MODEM_STAT(%lu) %x\n",
TXRX(addr), inb(TXRX(addr)),
INTR_ENAB(addr), inb(INTR_ENAB(addr)),
INTR_ID(addr), inb(INTR_ID(addr)),
@@ -849,7 +872,7 @@ MODEM_CTL(%x) %x, LINE_STAT(%x) %x, MODEM_STAT(%x) %x\n",
MODEM_STAT(addr),inb(MODEM_STAT(addr)));
}
-int compr(unit)
+int compr(int unit)
{
compr_addr(cominfo[unit]->address);
return(0);
diff --git a/i386/i386at/com.h b/i386/i386at/com.h
index 49f23eec..779cdba8 100644
--- a/i386/i386at/com.h
+++ b/i386/i386at/com.h
@@ -28,6 +28,8 @@
#include <mach/std_types.h>
#include <device/cons.h>
+#include <device/tty.h>
+#include <chips/busses.h>
/*
* Set receive modem state from modem status register.
@@ -47,5 +49,36 @@ extern int comcnprobe(struct consdev *cp);
extern int comcninit(struct consdev *cp);
extern int comcngetc(dev_t dev, int wait);
extern int comcnputc(dev_t dev, int c);
+extern void comintr(int unit);
+
+int comprobe(vm_offset_t port, struct bus_ctlr *dev);
+int commctl(struct tty *tp, int bits, int how);
+void comstart(struct tty *tp);
+void comstop(struct tty *tp, int flags);
+void comattach(struct bus_device *dev);
+
+extern io_return_t
+comgetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ natural_t *count);
+
+extern io_return_t
+comsetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ natural_t count);
+
+#if MACH_KDB
+extern void kdb_kintr(void);
+#endif /* MACH_KDB */
+
+extern io_return_t comopen(dev_t dev, int flag, io_req_t ior);
+extern void comclose(dev_t dev, int flag);
+extern io_return_t comread(dev_t dev, io_req_t ior);
+extern io_return_t comwrite(dev_t dev, io_req_t ior);
+extern io_return_t comportdeath(dev_t dev, mach_port_t port);
#endif /* _COM_H_ */
diff --git a/i386/i386at/conf.c b/i386/i386at/conf.c
index 83c8dbfc..fe7c7c09 100644
--- a/i386/i386at/conf.c
+++ b/i386/i386at/conf.c
@@ -29,48 +29,42 @@
#include <mach/machine/vm_types.h>
#include <device/conf.h>
+#include <kern/mach_clock.h>
+#include <i386at/model_dep.h>
-extern int timeopen(), timeclose();
-extern vm_offset_t timemmap();
#define timename "time"
#ifndef MACH_HYP
-extern int kdopen(), kdclose(), kdread(), kdwrite();
-extern int kdgetstat(), kdsetstat(), kdportdeath();
-extern vm_offset_t kdmmap();
+#include <i386at/kd.h>
#define kdname "kd"
#if NCOM > 0
-extern int comopen(), comclose(), comread(), comwrite();
-extern int comgetstat(), comsetstat(), comportdeath();
+#include <i386at/com.h>
#define comname "com"
#endif /* NCOM > 0 */
#if NLPR > 0
-extern int lpropen(), lprclose(), lprread(), lprwrite();
-extern int lprgetstat(), lprsetstat(), lprportdeath();
+#include <i386at/lpr.h>
#define lprname "lpr"
#endif /* NLPR > 0 */
#endif /* MACH_HYP */
-extern int kbdopen(), kbdclose(), kbdread();
-extern int kbdgetstat(), kbdsetstat();
+#include <i386at/kd_event.h>
#define kbdname "kbd"
#ifndef MACH_HYP
-extern int mouseopen(), mouseclose(), mouseread(), mousegetstat();
+#include <i386at/kd_mouse.h>
#define mousename "mouse"
-extern vm_offset_t memmmap();
+#include <i386at/mem.h>
#define memname "mem"
#endif /* MACH_HYP */
-extern int kmsgopen(), kmsgclose(), kmsgread(), kmsggetstat();
+#include <device/kmsg.h>
#define kmsgname "kmsg"
#ifdef MACH_HYP
-extern int hypcnopen(), hypcnclose(), hypcnread(), hypcnwrite();
-extern int hypcngetstat(), hypcnsetstat(), hypcnportdeath();
+#include <xen/console.h>
#define hypcnname "hyp"
#endif /* MACH_HYP */
@@ -87,21 +81,27 @@ struct dev_ops dev_name_list[] =
/* We don't assign a console here, when we find one via
cninit() we stick something appropriate here through the
indirect list */
- { "cn", nulldev, nulldev, nulldev,
- nulldev, nulldev, nulldev, nomap,
- nodev, nulldev, nulldev, 0,
+ { "cn", nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat, nomap,
+ nodev, nulldev, nulldev_portdeath, 0,
nodev },
#ifndef MACH_HYP
+#if ENABLE_IMMEDIATE_CONSOLE
+ { "immc", nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat,
+ nomap, nodev, nulldev, nulldev_portdeath, 0,
+ nodev },
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
{ kdname, kdopen, kdclose, kdread,
kdwrite, kdgetstat, kdsetstat, kdmmap,
nodev, nulldev, kdportdeath, 0,
nodev },
#endif /* MACH_HYP */
- { timename, timeopen, timeclose, nulldev,
- nulldev, nulldev, nulldev, timemmap,
- nodev, nulldev, nulldev, 0,
+ { timename, timeopen, timeclose, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat, timemmap,
+ nodev, nulldev, nulldev_portdeath, 0,
nodev },
#ifndef MACH_HYP
@@ -120,25 +120,25 @@ struct dev_ops dev_name_list[] =
#endif
{ mousename, mouseopen, mouseclose, mouseread,
- nodev, mousegetstat, nulldev, nomap,
- nodev, nulldev, nulldev, 0,
+ nulldev_write, mousegetstat, nulldev_setstat, nomap,
+ nodev, nulldev, nulldev_portdeath, 0,
nodev },
{ kbdname, kbdopen, kbdclose, kbdread,
- nodev, kbdgetstat, kbdsetstat, nomap,
- nodev, nulldev, nulldev, 0,
+ nulldev_write, kbdgetstat, kbdsetstat, nomap,
+ nodev, nulldev, nulldev_portdeath, 0,
nodev },
- { memname, nulldev, nulldev, nodev,
- nodev, nodev, nodev, memmmap,
- nodev, nulldev, nulldev, 0,
+ { memname, nulldev_open, nulldev_close, nulldev_read,
+ nulldev_write, nulldev_getstat, nulldev_setstat, memmmap,
+ nodev, nulldev, nulldev_portdeath, 0,
nodev },
#endif /* MACH_HYP */
#ifdef MACH_KMSG
{ kmsgname, kmsgopen, kmsgclose, kmsgread,
- nodev, kmsggetstat, nodev, nomap,
- nodev, nulldev, nulldev, 0,
+ nulldev_write, kmsggetstat, nulldev_setstat, nomap,
+ nodev, nulldev, nulldev_portdeath, 0,
nodev },
#endif
diff --git a/i386/i386at/cons_conf.c b/i386/i386at/cons_conf.c
index cf42bb63..1d7dd387 100644
--- a/i386/i386at/cons_conf.c
+++ b/i386/i386at/cons_conf.c
@@ -39,6 +39,10 @@
#endif
#endif /* MACH_HYP */
+#if ENABLE_IMMEDIATE_CONSOLE
+#include "immc.h"
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
/*
* The rest of the consdev fields are filled in by the respective
* cnprobe routine.
@@ -47,6 +51,9 @@ struct consdev constab[] = {
#ifdef MACH_HYP
{"hyp", hypcnprobe, hypcninit, hypcngetc, hypcnputc},
#else /* MACH_HYP */
+#if ENABLE_IMMEDIATE_CONSOLE
+ {"immc", immc_cnprobe, immc_cninit, immc_cngetc, immc_cnputc},
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
{"kd", kdcnprobe, kdcninit, kdcngetc, kdcnputc},
#if NCOM > 0
{"com", comcnprobe, comcninit, comcngetc, comcnputc},
diff --git a/i386/i386at/cram.h b/i386/i386at/cram.h
index 8373ce03..40f3f0a5 100644
--- a/i386/i386at/cram.h
+++ b/i386/i386at/cram.h
@@ -50,6 +50,9 @@ NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#ifndef _CRAM_H_
+#define _CRAM_H_
+
/*
* outb(CMOS_ADDR, addr);
* result = inb(CMOS_DATA);
@@ -73,3 +76,4 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#define CM_CGA_80 0x20
#define CM_MONO_80 0x30
+#endif /* _CRAM_H_ */
diff --git a/i386/i386at/disk.h b/i386/i386at/disk.h
index e1fe6b98..c5583752 100644
--- a/i386/i386at/disk.h
+++ b/i386/i386at/disk.h
@@ -49,53 +49,16 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
* disk.h
*/
-/* Grab the public part. */
-#include <mach/machine/disk.h>
-
-
-
-#define MAX_ALTENTS 253 /* Maximum # of slots for alts */
- /* allowed for in the table. */
-
-#define ALT_SANITY 0xdeadbeef /* magic # to validate alt table */
-
-struct alt_table {
- u_short alt_used; /* # of alternates already assigned */
- u_short alt_reserved; /* # of alternates reserved on disk */
- long alt_base; /* 1st sector (abs) of the alt area */
- long alt_bad[MAX_ALTENTS]; /* list of bad sectors/tracks */
-};
-
-struct alt_info { /* table length should be multiple of 512 */
- long alt_sanity; /* to validate correctness */
- u_short alt_version; /* to corroborate vintage */
- u_short alt_pad; /* padding for alignment */
- struct alt_table alt_trk; /* bad track table */
- struct alt_table alt_sec; /* bad sector table */
-};
-typedef struct alt_info altinfo_t;
+#ifndef _DISK_H_
+#define _DISK_H_
#define V_NUMPAR 16 /* maximum number of partitions */
#define VTOC_SANE 0x600DDEEE /* Indicates a sane VTOC */
#define PDLOCATION 29 /* location of VTOC */
-#define BAD_BLK 0x80 /* needed for V_VERIFY */
-/* BAD_BLK moved from old hdreg.h */
-
-
-#define HDPDLOC 29 /* location of pdinfo/vtoc */
#define LBLLOC 1 /* label block for xxxbsd */
-/* Partition permission flags */
-#define V_OPEN 0x100 /* Partition open (for driver use) */
-#define V_VALID 0x200 /* Partition is valid to use */
-
-
-
-/* Sanity word for the physical description area */
-#define VALID_PD 0xCA5E600D
-
struct localpartition {
u_int p_flag; /*permision flags*/
long p_start; /*physical start sector no of partition*/
@@ -123,64 +86,4 @@ struct evtoc {
char fill[512-352];
};
-union io_arg {
- struct {
- u_short ncyl; /* number of cylinders on drive */
- u_char nhead; /* number of heads/cyl */
- u_char nsec; /* number of sectors/track */
- u_short secsiz; /* number of bytes/sector */
- } ia_cd; /* used for Configure Drive cmd */
- struct {
- u_short flags; /* flags (see below) */
- long bad_sector; /* absolute sector number */
- long new_sector; /* RETURNED alternate sect assigned */
- } ia_abs; /* used for Add Bad Sector cmd */
- struct {
- u_short start_trk; /* first track # */
- u_short num_trks; /* number of tracks to format */
- u_short intlv; /* interleave factor */
- } ia_fmt; /* used for Format Tracks cmd */
- struct {
- u_short start_trk; /* first track */
- char *intlv_tbl; /* interleave table */
- } ia_xfmt; /* used for the V_XFORMAT ioctl */
-};
-
-
-#define BOOTSZ 446 /* size of boot code in master boot block */
-#define FD_NUMPART 4 /* number of 'partitions' in fdisk table */
-#define ACTIVE 128 /* indicator of active partition */
-#define BOOT_MAGIC 0xAA55 /* signature of the boot record */
-#define UNIXOS 99 /* UNIX partition */
-#define BSDOS 165
-#define LINUXSWAP 130
-#define LINUXOS 131
-extern int OS; /* what partition we came from */
-
-/*
- * structure to hold the fdisk partition table
- */
-struct ipart {
- u_char bootid; /* bootable or not */
- u_char beghead; /* beginning head, sector, cylinder */
- u_char begsect; /* begcyl is a 10-bit number. High 2 bits */
- u_char begcyl; /* are in begsect. */
- u_char systid; /* OS type */
- u_char endhead; /* ending head, sector, cylinder */
- u_char endsect; /* endcyl is a 10-bit number. High 2 bits */
- u_char endcyl; /* are in endsect. */
- long relsect; /* first sector relative to start of disk */
- long numsect; /* number of sectors in partition */
-};
-
-/*
- * structure to hold master boot block in physical sector 0 of the disk.
- * Note that partitions stuff can't be directly included in the structure
- * because of lameo '386 compiler alignment design.
- */
-struct mboot { /* master boot block */
- char bootinst[BOOTSZ];
- char parts[FD_NUMPART * sizeof(struct ipart)];
- u_short signature;
-};
-
+#endif /* _DISK_H_ */
diff --git a/i386/i386at/elf.h b/i386/i386at/elf.h
new file mode 100644
index 00000000..26f4d87b
--- /dev/null
+++ b/i386/i386at/elf.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _X86_ELF_H
+#define _X86_ELF_H
+
+#define ELF_SHT_SYMTAB 2
+#define ELF_SHT_STRTAB 3
+
+struct elf_shdr {
+ unsigned int name;
+ unsigned int type;
+ unsigned int flags;
+ unsigned long addr;
+ unsigned long offset;
+ unsigned int size;
+ unsigned int link;
+ unsigned int info;
+ unsigned int addralign;
+ unsigned int entsize;
+};
+
+#ifdef __LP64__
+
+struct elf_sym {
+ unsigned int name;
+ unsigned char info;
+ unsigned char other;
+ unsigned short shndx;
+ unsigned long value;
+ unsigned long size;
+};
+
+#else /* __LP64__ */
+
+struct elf_sym {
+ unsigned int name;
+ unsigned long value;
+ unsigned long size;
+ unsigned char info;
+ unsigned char other;
+ unsigned short shndx;
+};
+
+#endif /* __LP64__ */
+
+#endif /* _X86_ELF_H */
diff --git a/i386/i386at/grub_glue.c b/i386/i386at/grub_glue.c
new file mode 100644
index 00000000..68a4cb1f
--- /dev/null
+++ b/i386/i386at/grub_glue.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <kern/printf.h>
+#include <stdarg.h>
+#include <i386/vm_param.h>
+
+#include <grub/glue.h>
+#include <grub/acpi.h>
+
+#define GRUB_DEBUG 0
+
+void
+grub_real_dprintf (const char *file, const int line, const char *condition,
+ const char *fmt, ...)
+{
+#if GRUB_DEBUG
+ va_list listp;
+ va_start(listp, fmt);
+ vprintf (fmt, listp);
+ va_end(listp);
+#endif
+}
+
+void
+grub_millisleep (grub_uint32_t ms)
+{
+ /* Do nothing. */
+}
+
+struct grub_acpi_rsdp_v20 *
+grub_acpi_get_rsdpv2 (void)
+{
+ return grub_machine_acpi_get_rsdpv2 ();
+}
+
+struct grub_acpi_rsdp_v10 *
+grub_acpi_get_rsdpv1 (void)
+{
+ return grub_machine_acpi_get_rsdpv1 ();
+}
+
+/* Simple checksum by summing all bytes. Used by ACPI and SMBIOS. */
+grub_uint8_t
+grub_byte_checksum (void *base, grub_size_t size)
+{
+ grub_uint8_t *ptr;
+ grub_uint8_t ret = 0;
+ for (ptr = (grub_uint8_t *) base; ptr < ((grub_uint8_t *) base) + size;
+ ptr++)
+ ret += *ptr;
+ return ret;
+}
diff --git a/i386/i386at/i8250.h b/i386/i386at/i8250.h
index fa81173e..9b8a8019 100644
--- a/i386/i386at/i8250.h
+++ b/i386/i386at/i8250.h
@@ -49,6 +49,9 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
* Header file for i8250 chip
*/
+#ifndef _I8250_H_
+#define _I8250_H_
+
/* port offsets from the base i/o address */
#define RDAT 0
@@ -127,3 +130,5 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#define BCNT2400 0x30
#define BCNT4800 0x18
#define BCNT9600 0x0c
+
+#endif /* _I8250_H_ */
diff --git a/i386/i386at/idt.h b/i386/i386at/idt.h
index 1b3284fa..56e6296c 100644
--- a/i386/i386at/idt.h
+++ b/i386/i386at/idt.h
@@ -36,6 +36,6 @@
#ifndef __ASSEMBLER__
extern void idt_init (void);
-#endif
+#endif /* __ASSEMBLER__ */
#endif /* _I386AT_IDT_ */
diff --git a/i386/i386at/immc.c b/i386/i386at/immc.c
index db64620e..bd61522d 100644
--- a/i386/i386at/immc.c
+++ b/i386/i386at/immc.c
@@ -21,8 +21,11 @@
* Author: Bryan Ford, University of Utah CSL
*/
-#ifdef ENABLE_IMMEDIATE_CONSOLE
+#if ENABLE_IMMEDIATE_CONSOLE
+#include <device/cons.h>
+#include <mach/boolean.h>
+#include <i386/vm_param.h>
#include <string.h>
/* This is a special "feature" (read: kludge)
@@ -33,24 +36,67 @@
so it can be used to debug things that happen very early
before any devices are initialized. */
-int immediate_console_enable = 1;
+boolean_t immediate_console_enable = TRUE;
-void
-immc_cnputc(unsigned char c)
+/*
+ * XXX we assume that pcs *always* have a console
+ */
+int
+immc_cnprobe(struct consdev *cp)
+{
+ int maj, unit, pri;
+
+ maj = 0;
+ unit = 0;
+ pri = CN_INTERNAL;
+
+ cp->cn_dev = makedev(maj, unit);
+ cp->cn_pri = pri;
+ return 0;
+}
+
+int
+immc_cninit(struct consdev *cp)
+{
+ return 0;
+}
+
+int immc_cnmaygetc(void)
+{
+ return -1;
+}
+
+int
+immc_cngetc(dev_t dev, int wait)
+{
+ if (wait) {
+ int c;
+ while ((c = immc_cnmaygetc()) < 0)
+ continue;
+ return c;
+ }
+ else
+ return immc_cnmaygetc();
+}
+
+int
+immc_cnputc(dev_t dev, int c)
{
static int ofs = -1;
if (!immediate_console_enable)
- return;
- if (ofs < 0)
+ return -1;
+ if (ofs < 0 || ofs >= 80)
{
ofs = 0;
- immc_cnputc('\n');
+ immc_cnputc(dev, '\n');
}
- else if (c == '\n')
+
+ if (c == '\n')
{
- memmove(0xb8000, 0xb8000+80*2, 80*2*24);
- memset(0xb8000+80*2*24, 0, 80*2);
+ memmove((void *) phystokv(0xb8000),
+ (void *) phystokv(0xb8000+80*2), 80*2*24);
+ memset((void *) phystokv((0xb8000+80*2*24)), 0, 80*2);
ofs = 0;
}
else
@@ -59,20 +105,22 @@ immc_cnputc(unsigned char c)
if (ofs >= 80)
{
- immc_cnputc('\r');
- immc_cnputc('\n');
+ immc_cnputc(dev, '\r');
+ immc_cnputc(dev, '\n');
}
- p = (void*)0xb8000 + 80*2*24 + ofs*2;
+ p = (void *) phystokv(0xb8000 + 80*2*24 + ofs*2);
p[0] = c;
p[1] = 0x0f;
ofs++;
}
+ return 0;
}
-int immc_cnmaygetc(void)
+void
+immc_romputc(char c)
{
- return -1;
+ immc_cnputc (0, c);
}
#endif /* ENABLE_IMMEDIATE_CONSOLE */
diff --git a/i386/i386at/immc.h b/i386/i386at/immc.h
new file mode 100644
index 00000000..dc802c84
--- /dev/null
+++ b/i386/i386at/immc.h
@@ -0,0 +1,31 @@
+/* Declarations for the immediate console.
+
+ Copyright (C) 2015 Free Software Foundation, Inc.
+
+ This file is part of the GNU Mach.
+
+ The GNU Mach is free software; you can redistribute it and/or
+ modify it under the terms of the GNU General Public License as
+ published by the Free Software Foundation; either version 2, or (at
+ your option) any later version.
+
+ The GNU Mach is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with the GNU Mach. If not, see <http://www.gnu.org/licenses/>. */
+
+#ifndef _IMMC_H_
+#define _IMMC_H_
+
+#include <sys/types.h>
+
+int immc_cnprobe(struct consdev *cp);
+int immc_cninit(struct consdev *cp);
+int immc_cngetc(dev_t dev, int wait);
+int immc_cnputc(dev_t dev, int c);
+void immc_romputc(char c);
+
+#endif /* _IMMC_H_ */
diff --git a/i386/i386at/int_init.c b/i386/i386at/int_init.c
index 0f00b868..43daad8b 100644
--- a/i386/i386at/int_init.c
+++ b/i386/i386at/int_init.c
@@ -27,7 +27,7 @@
/* defined in locore.S */
extern vm_offset_t int_entry_table[];
-void int_init()
+void int_init(void)
{
int i;
diff --git a/i386/i386at/int_init.h b/i386/i386at/int_init.h
index f4abef0b..f9b03b74 100644
--- a/i386/i386at/int_init.h
+++ b/i386/i386at/int_init.h
@@ -29,6 +29,6 @@
#ifndef __ASSEMBLER__
extern void int_init (void);
-#endif
+#endif /* __ASSEMBLER__ */
#endif /* _INT_INIT_H_ */
diff --git a/i386/i386at/kd.c b/i386/i386at/kd.c
index c9778629..9ed3958a 100644
--- a/i386/i386at/kd.c
+++ b/i386/i386at/kd.c
@@ -83,7 +83,7 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <device/conf.h>
#include <device/tty.h>
#include <device/io_req.h>
-#include <device/buf.h> /* for struct uio (!) */
+#include <device/buf.h>
#include <vm/vm_kern.h>
#include <i386/locore.h>
#include <i386/loose_ends.h>
@@ -100,20 +100,15 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#define DEBUG 1 /* export feep() */
-void kd_enqsc(); /* enqueues a scancode */
-
#if 0
#define BROKEN_KEYBOARD_RESET
#endif
struct tty kd_tty;
-extern int rebootflag;
+extern boolean_t rebootflag;
static void charput(), charmvup(), charmvdown(), charclear(), charsetcursor();
-static void kd_noopreset();
-boolean_t kdcheckmagic();
-
-int do_modifier (int, Scancode, boolean_t);
+static void kd_noopreset(void);
/*
* These routines define the interface to the device-specific layer.
@@ -127,10 +122,6 @@ void (*kd_dsetcursor)() = charsetcursor;
/* set cursor position on displayed page */
void (*kd_dreset)() = kd_noopreset; /* prepare for reboot */
-/* forward declarations */
-unsigned char kd_getdata(), state2leds();
-
-
/*
* Globals used for both character-based controllers and bitmap-based
* controllers. Default is EGA.
@@ -213,7 +204,7 @@ u_char *esc_spt = (u_char *)0;
- Delete returns `ESC [ 9' instead of 0x7f.
- Alt + function keys return key sequences that are different
from the key sequences returned by the function keys alone.
- This is done with the idea of alowing a terminal server to
+ This is done with the idea of allowing a terminal server to
implement multiple virtual consoles mapped on Alt+F1, Alt+F2,
etc, as in Linux.
@@ -249,8 +240,7 @@ unsigned char key_map[NUMKEYS][WIDTH_KMAP] = {
{K_LBRKT,NC,NC, K_LBRACE,NC,NC, K_ESC,NC,NC, 0x1b,K_LBRKT,NC, 0x1b,0x4e,K_LBRACE},
{K_RBRKT,NC,NC, K_RBRACE,NC,NC, K_GS,NC,NC, 0x1b,K_RBRKT,NC, 0x1b,0x4e,K_RBRACE},
{K_CR,NC,NC, K_CR,NC,NC, K_CR,NC,NC, 0x1b,K_CR,NC, K_CR,NC,NC},
-{K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC,
- K_SCAN,K_CTLSC,NC},
+{K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC, K_SCAN,K_CTLSC,NC},
{K_a,NC,NC, K_A,NC,NC, K_SOH,NC,NC, 0x1b,K_a,NC, 0x1b,0x4e,K_A},
{K_s,NC,NC, K_S,NC,NC, K_DC3,NC,NC, 0x1b,K_s,NC, 0x1b,0x4e,K_S},
{K_d,NC,NC, K_D,NC,NC, K_EOT,NC,NC, 0x1b,K_d,NC, 0x1b,0x4e,K_D},
@@ -263,8 +253,7 @@ unsigned char key_map[NUMKEYS][WIDTH_KMAP] = {
{K_SEMI,NC,NC, K_COLON,NC,NC, K_SEMI,NC,NC, 0x1b,K_SEMI,NC, 0x1b,0x4e,K_COLON},
{K_SQUOTE,NC,NC,K_DQUOTE,NC,NC, K_SQUOTE,NC,NC,0x1b,K_SQUOTE,NC, 0x1b,0x4e,K_DQUOTE},
{K_GRAV,NC,NC, K_TILDE,NC,NC, K_RS,NC,NC, 0x1b,K_GRAV,NC, 0x1b,0x4e,K_TILDE},
-{K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC,
- K_SCAN,K_LSHSC,NC},
+{K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC, K_SCAN,K_LSHSC,NC},
{K_BSLSH,NC,NC, K_PIPE,NC,NC, K_FS,NC,NC, 0x1b,K_BSLSH,NC, 0x1b,0x4e,K_PIPE},
{K_z,NC,NC, K_Z,NC,NC, K_SUB,NC,NC, 0x1b,K_z,NC, 0x1b,0x4e,K_Z},
{K_x,NC,NC, K_X,NC,NC, K_CAN,NC,NC, 0x1b,K_x,NC, 0x1b,0x4e,K_X},
@@ -276,14 +265,11 @@ unsigned char key_map[NUMKEYS][WIDTH_KMAP] = {
{K_COMMA,NC,NC, K_LTHN,NC,NC, K_COMMA,NC,NC, 0x1b,K_COMMA,NC, 0x1b,0x4e,K_LTHN},
{K_PERIOD,NC,NC,K_GTHN,NC,NC, K_PERIOD,NC,NC,0x1b,K_PERIOD,NC, 0x1b,0x4e,K_GTHN},
{K_SLASH,NC,NC, K_QUES,NC,NC, K_SLASH,NC,NC, 0x1b,K_SLASH,NC, 0x1b,0x4e,K_QUES},
-{K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC,
- K_SCAN,K_RSHSC,NC},
+{K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC, K_SCAN,K_RSHSC,NC},
{K_ASTER,NC,NC, K_ASTER,NC,NC, K_ASTER,NC,NC, 0x1b,K_ASTER,NC, 0x1b,0x4e,K_ASTER},
-{K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC,
- K_SCAN,K_ALTSC,NC},
+{K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC, K_SCAN,K_ALTSC,NC},
{K_SPACE,NC,NC, K_SPACE,NC,NC, K_NUL,NC,NC, 0x1b,K_SPACE,NC, K_SPACE,NC,NC},
-{K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC,
- K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC},
+{K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC, K_SCAN,K_CLCKSC,NC},
{K_F1, K_F1S, K_F1, K_F1A, K_F1S},
{K_F2, K_F2S, K_F2, K_F2A, K_F2S},
{K_F3, K_F3S, K_F3, K_F3A, K_F3S},
@@ -294,20 +280,16 @@ unsigned char key_map[NUMKEYS][WIDTH_KMAP] = {
{K_F8, K_F8S, K_F8, K_F8A, K_F8S},
{K_F9, K_F9S, K_F9, K_F9A, K_F9S},
{K_F10, K_F10S, K_F10, K_F10A, K_F10S},
-{K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC,
- K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC},
+{K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC, K_SCAN,K_NLCKSC,NC},
{K_SCRL, K_NUL,NC,NC, K_SCRL, K_SCRL, K_NUL,NC,NC},
{K_HOME, K_SEVEN,NC,NC, K_HOME, K_HOME, 0x1b,0x4e,K_SEVEN},
{K_UA, K_EIGHT,NC,NC, K_UA, K_UA, 0x1b,0x4e,K_EIGHT},
{K_PUP, K_NINE,NC,NC, K_PUP, K_PUP, 0x1b,0x4e,K_NINE},
-{0x1b,0x5b,0x53, K_MINUS,NC,NC, 0x1b,0x5b,0x53, 0x1b,0x5b,0x53,
- 0x1b,0x4e,0x2d},
+{0x1b,0x5b,0x53, K_MINUS,NC,NC, 0x1b,0x5b,0x53, 0x1b,0x5b,0x53, 0x1b,0x4e,0x2d},
{K_LA, K_FOUR,NC,NC, K_LA, K_LA, 0x1b,0x4e,K_FOUR},
-{0x1b,0x5b,0x47, K_FIVE,NC,NC, 0x1b,0x5b,0x47, 0x1b,0x5b,0x47,
- 0x1b,0x4e,0x35},
+{0x1b,0x5b,0x47, K_FIVE,NC,NC, 0x1b,0x5b,0x47, 0x1b,0x5b,0x47, 0x1b,0x4e,0x35},
{K_RA, K_SIX,NC,NC, K_RA, K_RA, 0x1b,0x4e,K_SIX},
-{0x1b,0x5b,0x54, K_PLUS,NC,NC, 0x1b,0x5b,0x54, 0x1b,0x5b,0x54,
- 0x1b,0x4e,0x2b},
+{0x1b,0x5b,0x54, K_PLUS,NC,NC, 0x1b,0x5b,0x54, 0x1b,0x5b,0x54, 0x1b,0x4e,0x2b},
{K_END, K_ONE,NC,NC, K_END, K_END, 0x1b,0x4e,K_ONE},
{K_DA, K_TWO,NC,NC, K_DA, K_DA, 0x1b,0x4e,K_TWO},
{K_PDN, K_THREE,NC,NC, K_PDN, K_PDN, 0x1b,0x4e,K_THREE},
@@ -367,7 +349,7 @@ int kd_pollc = 0;
* Warning: uses outb(). You may prefer to use kd_debug_put.
*/
void
-feep()
+feep(void)
{
int i;
@@ -378,7 +360,7 @@ feep()
}
void
-pause()
+pause(void)
{
int i;
@@ -392,9 +374,9 @@ pause()
* one column to the left, etc.
*/
void
-kd_debug_put(loc, c)
-int loc;
-char c;
+kd_debug_put(
+ int loc,
+ char c)
{
csrpos_t pos = ONE_PAGE - (loc+1) * ONE_SPACE;
@@ -403,12 +385,11 @@ char c;
#endif /* DEBUG */
-extern int mouse_in_use;
-int old_kb_mode;
+extern boolean_t mouse_in_use;
+int old_kb_mode;
void
-cnpollc(on)
-boolean_t on;
+cnpollc(boolean_t on)
{
if (mouse_in_use) {
if (on) {
@@ -449,15 +430,13 @@ boolean_t on;
*
*/
int
-kdopen(dev, flag, ior)
- dev_t dev;
- int flag;
- io_req_t ior;
+kdopen(
+ dev_t dev,
+ int flag,
+ io_req_t ior)
{
struct tty *tp;
- void kdstart();
spl_t o_pri;
- void kdstop();
tp = &kd_tty;
o_pri = spltty();
@@ -498,7 +477,7 @@ kdopen(dev, flag, ior)
/*ARGSUSED*/
void
kdclose(dev, flag)
-int dev;
+dev_t dev;
int flag;
{
struct tty *tp;
@@ -530,8 +509,8 @@ int flag;
/*ARGSUSED*/
int
kdread(dev, uio)
-int dev;
-struct uio *uio;
+dev_t dev;
+io_req_t uio;
{
struct tty *tp;
@@ -555,8 +534,8 @@ struct uio *uio;
/*ARGSUSED*/
int
kdwrite(dev, uio)
-int dev;
-struct uio *uio;
+dev_t dev;
+io_req_t uio;
{
return((*linesw[kd_tty.t_line].l_write)(&kd_tty, uio));
}
@@ -569,10 +548,10 @@ struct uio *uio;
int
kdmmap(dev, off, prot)
dev_t dev;
- off_t off;
- int prot;
+ vm_offset_t off;
+ vm_prot_t prot;
{
- if ((u_int) off >= (128*1024))
+ if (off >= (128*1024))
return(-1);
/* Get page frame number for the page to be mapped. */
@@ -580,19 +559,19 @@ kdmmap(dev, off, prot)
}
int
-kdportdeath(dev, port)
- dev_t dev;
- mach_port_t port;
+kdportdeath(
+ dev_t dev,
+ mach_port_t port)
{
return (tty_portdeath(&kd_tty, (ipc_port_t)port));
}
/*ARGSUSED*/
-io_return_t kdgetstat(dev, flavor, data, count)
- dev_t dev;
- int flavor;
- int * data; /* pointer to OUT array */
- natural_t *count; /* OUT */
+io_return_t kdgetstat(
+ dev_t dev,
+ int flavor,
+ int * data, /* pointer to OUT array */
+ natural_t *count) /* OUT */
{
io_return_t result;
@@ -618,11 +597,11 @@ io_return_t kdgetstat(dev, flavor, data, count)
}
/*ARGSUSED*/
-io_return_t kdsetstat(dev, flavor, data, count)
- dev_t dev;
- int flavor;
- int * data;
- natural_t count;
+io_return_t kdsetstat(
+ dev_t dev,
+ int flavor,
+ int * data,
+ natural_t count)
{
io_return_t result;
@@ -655,13 +634,12 @@ io_return_t kdsetstat(dev, flavor, data, count)
* on/off value.
*/
int
-kdsetbell(val, flags)
-int val; /* on or off */
-int flags; /* flags set for console */
+kdsetbell(
+ int val, /* on or off */
+ int flags) /* flags set for console */
{
int err = 0;
-
if (val == KD_BELLON)
kd_bellon();
else if (val == KD_BELLOFF)
@@ -672,15 +650,13 @@ int flags; /* flags set for console */
return(err);
}
-
/*
* kdgetkbent:
*
* Get entry from key mapping table. Returns error code, if any.
*/
int
-kdgetkbent(kbent)
-struct kbentry * kbent;
+kdgetkbent(struct kbentry *kbent)
{
u_char *cp;
spl_t o_pri = SPLKD(); /* probably superfluous */
@@ -700,9 +676,9 @@ struct kbentry * kbent;
* Set entry in key mapping table. Return error code, if any.
*/
int
-kdsetkbent(kbent, flags)
-struct kbentry * kbent;
-int flags; /* flags set for console */
+kdsetkbent(
+ struct kbentry *kbent,
+ int flags) /* flags set for console */
{
u_char *cp;
spl_t o_pri;
@@ -732,15 +708,14 @@ int flags; /* flags set for console */
*/
/*ARGSUSED*/
void
-kdintr(vec)
-int vec;
+kdintr(int vec)
{
struct tty *tp;
unsigned char c;
unsigned char scancode;
- int char_idx;
+ unsigned int char_idx;
boolean_t up = FALSE; /* key-up event */
- extern int mouse_in_use;
+
if (kd_pollc)
return; /* kdb polling kbd */
@@ -749,7 +724,8 @@ int vec;
tp = &kd_tty;
#ifdef old
- while ((inb(K_STATUS) & K_OBUF_FUL) == 0); /* this should never loop */
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ ; /* this should never loop */
#else /* old */
{
/*
@@ -818,7 +794,7 @@ int vec;
set_kd_state(do_modifier(kd_state, c, up));
} else if (!up) {
/* regular key-down */
- int max; /* max index for char sequence */
+ unsigned int max; /* max index for char sequence */
max = char_idx + NUMOUTPUT;
char_idx++;
@@ -873,7 +849,7 @@ int vec;
* drop the ack on the floor.
*/
void
-kd_handle_ack()
+kd_handle_ack(void)
{
switch (kd_ack) {
case SET_LEDS:
@@ -898,7 +874,7 @@ kd_handle_ack()
* Resend a missed keyboard command or data byte.
*/
void
-kd_resend()
+kd_resend(void)
{
if (kd_ack == NOT_WAITING)
printf("unexpected RESEND from keyboard\n");
@@ -919,10 +895,10 @@ kd_resend()
* output: the new state
*/
int
-do_modifier(state, c, up)
-int state;
-Scancode c;
-boolean_t up;
+do_modifier(
+ int state,
+ Scancode c,
+ boolean_t up)
{
switch (c) {
case (K_ALTSC):
@@ -984,12 +960,10 @@ boolean_t up;
* are still held down.
*/
boolean_t
-kdcheckmagic(scancode)
-Scancode scancode;
+kdcheckmagic(Scancode scancode)
{
static int magic_state = KS_NORMAL; /* like kd_state */
boolean_t up = FALSE;
- extern int rebootflag;
if (scancode == 0x46) /* scroll lock */
/* if (scancode == 0x52) ** insert key */
@@ -1040,9 +1014,9 @@ Scancode scancode;
* Return the value for the 2nd index into key_map that
* corresponds to the given state.
*/
-int
+unsigned int
kdstate2idx(state, extended)
-int state; /* bit vector, not a state index */
+unsigned int state; /* bit vector, not a state index */
boolean_t extended;
{
int state_idx = NORM_STATE;
@@ -1081,12 +1055,10 @@ boolean_t extended;
* ASSUMES that it is never called from interrupt-driven code.
*/
void
-kdstart(tp)
-struct tty *tp;
+kdstart(struct tty *tp)
{
spl_t o_pri;
int ch;
- unsigned char c;
if (tp->t_state & TS_TTSTOP)
return;
@@ -1096,33 +1068,12 @@ struct tty *tp;
break;
if ((tp->t_outq.c_cc <= 0) || (ch = getc(&tp->t_outq)) == -1)
break;
- c = ch;
/*
* Drop priority for long screen updates. ttstart() calls us at
* spltty.
*/
o_pri = splsoftclock(); /* block timeout */
- if (c == (K_ESC)) {
- if (esc_spt == esc_seq) {
- *(esc_spt++)=(K_ESC);
- *(esc_spt) = '\0';
- } else {
- kd_putc((K_ESC));
- esc_spt = esc_seq;
- }
- } else {
- if (esc_spt - esc_seq) {
- if (esc_spt - esc_seq > K_MAXESC - 1)
- esc_spt = esc_seq;
- else {
- *(esc_spt++) = c;
- *(esc_spt) = '\0';
- kd_parseesc();
- }
- } else {
- kd_putc(c);
- }
- }
+ kd_putc_esc(ch);
splx(o_pri);
}
if (tp->t_outq.c_cc <= TTLOWAT(tp)) {
@@ -1132,9 +1083,9 @@ struct tty *tp;
/*ARGSUSED*/
void
-kdstop(tp, flags)
- register struct tty *tp;
- int flags;
+kdstop(
+ struct tty *tp,
+ int flags)
{
/*
* do nothing - all characters are output by one call to
@@ -1159,9 +1110,8 @@ kdstop(tp, flags)
*
*/
void
-kdinit()
+kdinit(void)
{
- void kd_xga_init();
unsigned char k_comm; /* keyboard command byte */
if (kd_initialized)
@@ -1190,19 +1140,19 @@ kdinit()
kd_senddata(k_comm);
kd_initialized = TRUE;
-#ifdef ENABLE_IMMEDIATE_CONSOLE
+#if ENABLE_IMMEDIATE_CONSOLE
/* Now that we're set up, we no longer need or want the
immediate console. */
{
- extern int immediate_console_enable;
- immediate_console_enable = 0;
+ extern boolean_t immediate_console_enable;
+ immediate_console_enable = FALSE;
}
/* The immediate console printed stuff at the bottom of the
screen rather than at the cursor position, so that's where
we should start. */
kd_setpos(ONE_PAGE - ONE_LINE); printf("\n");
-#endif
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
cnsetleds(kd_state = KS_NORMAL);
/* clear the LEDs AFTER we
@@ -1224,7 +1174,7 @@ kdinit()
* output : bell is turned off
*
*/
-static unsigned int kd_bellstate = 0;
+static boolean_t kd_bellstate = FALSE;
void
kd_belloff(void * param)
@@ -1233,7 +1183,7 @@ kd_belloff(void * param)
status = (inb(K_PORTB) & ~(K_SPKRDATA | K_ENABLETMR2));
outb(K_PORTB, status);
- kd_bellstate = 0;
+ kd_bellstate = FALSE;
return;
}
@@ -1248,7 +1198,7 @@ kd_belloff(void * param)
*
*/
void
-kd_bellon()
+kd_bellon(void)
{
unsigned char status;
@@ -1265,6 +1215,43 @@ kd_bellon()
/*
*
+ * Function kd_putc_esc():
+ *
+ * This function puts a character on the screen, handling escape
+ * sequences.
+ *
+ * input : character to be displayed (or part of an escape code)
+ * output : character is displayed, or some action is taken
+ *
+ */
+void
+kd_putc_esc(u_char c)
+{
+ if (c == (K_ESC)) {
+ if (esc_spt == esc_seq) {
+ *(esc_spt++)=(K_ESC);
+ *(esc_spt) = '\0';
+ } else {
+ kd_putc((K_ESC));
+ esc_spt = esc_seq;
+ }
+ } else {
+ if (esc_spt - esc_seq) {
+ if (esc_spt - esc_seq > K_MAXESC - 1)
+ esc_spt = esc_seq;
+ else {
+ *(esc_spt++) = c;
+ *(esc_spt) = '\0';
+ kd_parseesc();
+ }
+ } else {
+ kd_putc(c);
+ }
+ }
+}
+
+/*
+ *
* Function kd_putc():
*
* This function simply puts a character on the screen. It does some
@@ -1278,8 +1265,7 @@ kd_bellon()
int sit_for_0 = 1;
void
-kd_putc(ch)
-u_char ch;
+kd_putc(u_char ch)
{
if ((!ch) && sit_for_0)
return;
@@ -1306,7 +1292,7 @@ u_char ch;
{
kd_bellon();
timeout(kd_belloff, 0, hz/8 );
- kd_bellstate = 1;
+ kd_bellstate = TRUE;
}
break;
default:
@@ -1332,8 +1318,7 @@ u_char ch;
*
*/
void
-kd_setpos(newpos)
-csrpos_t newpos;
+kd_setpos(csrpos_t newpos)
{
if (newpos > ONE_PAGE) {
kd_scrollup();
@@ -1359,7 +1344,7 @@ csrpos_t newpos;
*
*/
void
-kd_scrollup()
+kd_scrollup(void)
{
csrpos_t to;
csrpos_t from;
@@ -1389,7 +1374,7 @@ kd_scrollup()
*
*/
void
-kd_scrolldn()
+kd_scrolldn(void)
{
csrpos_t to;
csrpos_t from;
@@ -1423,7 +1408,7 @@ kd_scrolldn()
*
*/
void
-kd_parseesc()
+kd_parseesc(void)
{
u_char *escp;
@@ -1495,8 +1480,7 @@ unsigned char color_table[] = { 0, 4, 2, 6, 1, 5, 3, 7,
*
*/
void
-kd_parserest(cp)
-u_char *cp;
+kd_parserest(u_char *cp)
{
int number[16], npar = 0, i;
csrpos_t newpos;
@@ -1760,7 +1744,7 @@ u_char *cp;
}
void
-kd_tab()
+kd_tab(void)
{
int i;
@@ -1781,7 +1765,7 @@ kd_tab()
*
*/
void
-kd_cls()
+kd_cls(void)
{
(*kd_dclear)(0, ONE_PAGE/ONE_SPACE, kd_attr);
return;
@@ -1799,7 +1783,7 @@ kd_cls()
*
*/
void
-kd_home()
+kd_home(void)
{
kd_setpos(0);
return;
@@ -1816,7 +1800,7 @@ kd_home()
*
*/
void
-kd_up()
+kd_up(void)
{
if (kd_curpos < ONE_LINE)
kd_scrolldn();
@@ -1836,7 +1820,7 @@ kd_up()
*
*/
void
-kd_down()
+kd_down(void)
{
if (kd_curpos >= (ONE_PAGE - ONE_LINE))
kd_scrollup();
@@ -1856,7 +1840,7 @@ kd_down()
*
*/
void
-kd_right()
+kd_right(void)
{
if (kd_curpos < (ONE_PAGE - ONE_SPACE))
kd_setpos(kd_curpos + ONE_SPACE);
@@ -1878,7 +1862,7 @@ kd_right()
*
*/
void
-kd_left()
+kd_left(void)
{
if (0 < kd_curpos)
kd_setpos(kd_curpos - ONE_SPACE);
@@ -1897,7 +1881,7 @@ kd_left()
*
*/
void
-kd_cr()
+kd_cr(void)
{
kd_setpos(BEG_OF_LINE(kd_curpos));
return;
@@ -1911,11 +1895,11 @@ kd_cr()
* of the screen.
*
* input : None
- * output : Screen is cleared from current cursor postion to bottom
+ * output : Screen is cleared from current cursor position to bottom
*
*/
void
-kd_cltobcur()
+kd_cltobcur(void)
{
csrpos_t start;
int count;
@@ -1934,11 +1918,11 @@ kd_cltobcur()
* of the screen.
*
* input : None
- * output : Screen is cleared from current cursor postion to top
+ * output : Screen is cleared from current cursor position to top
*
*/
void
-kd_cltopcur()
+kd_cltopcur(void)
{
int count;
@@ -1958,7 +1942,7 @@ kd_cltopcur()
*
*/
void
-kd_cltoecur()
+kd_cltoecur(void)
{
csrpos_t i;
csrpos_t hold;
@@ -1981,7 +1965,7 @@ kd_cltoecur()
*
*/
void
-kd_clfrbcur()
+kd_clfrbcur(void)
{
csrpos_t i;
@@ -2002,8 +1986,7 @@ kd_clfrbcur()
*
*/
void
-kd_delln(number)
-int number;
+kd_delln(int number)
{
csrpos_t to;
csrpos_t from;
@@ -2041,8 +2024,7 @@ int number;
*
*/
void
-kd_insln(number)
-int number;
+kd_insln(int number)
{
csrpos_t to;
csrpos_t from;
@@ -2081,12 +2063,11 @@ int number;
*
*/
void
-kd_delch(number)
-int number;
+kd_delch(int number)
{
- int count; /* num words moved/filled */
- int delbytes; /* bytes to delete */
- register csrpos_t to;
+ int count; /* num words moved/filled */
+ int delbytes; /* bytes to delete */
+ csrpos_t to;
csrpos_t from;
csrpos_t nextline; /* start of next line */
@@ -2123,8 +2104,7 @@ int number;
*
*/
void
-kd_erase(number)
-int number;
+kd_erase(int number)
{
csrpos_t i;
csrpos_t stop;
@@ -2149,7 +2129,7 @@ int number;
*
*/
void
-kd_eraseln()
+kd_eraseln(void)
{
csrpos_t i;
csrpos_t stop;
@@ -2173,8 +2153,7 @@ kd_eraseln()
*
*/
void
-kd_insch(number)
-int number;
+kd_insch(int number)
{
csrpos_t to;
csrpos_t from;
@@ -2215,8 +2194,7 @@ int number;
*
*/
boolean_t
-kd_isupper(c)
-u_char c;
+kd_isupper(u_char c)
{
if (('A' <= c) && (c <= 'Z'))
return(TRUE);
@@ -2224,8 +2202,7 @@ u_char c;
}
boolean_t
-kd_islower(c)
-u_char c;
+kd_islower(u_char c)
{
if (('a' <= c) && (c <= 'z'))
return(TRUE);
@@ -2242,10 +2219,10 @@ u_char c;
*
*/
void
-kd_senddata(ch)
-unsigned char ch;
+kd_senddata(unsigned char ch)
{
- while (inb(K_STATUS) & K_IBUF_FUL);
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
outb(K_RDWR, ch);
last_sent = ch;
return;
@@ -2260,10 +2237,10 @@ unsigned char ch;
*
*/
void
-kd_sendcmd(ch)
-unsigned char ch;
+kd_sendcmd(unsigned char ch)
{
- while (inb(K_STATUS) & K_IBUF_FUL);
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
outb(K_CMD, ch);
return;
}
@@ -2277,41 +2254,47 @@ unsigned char ch;
* read.
*/
unsigned char
-kd_getdata()
+kd_getdata(void)
{
- while ((inb(K_STATUS) & K_OBUF_FUL) == 0);
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ ;
return(inb(K_RDWR));
}
unsigned char
-kd_cmdreg_read()
+kd_cmdreg_read(void)
{
int ch=KC_CMD_READ;
- while (inb(K_STATUS) & K_IBUF_FUL);
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
outb(K_CMD, ch);
- while ((inb(K_STATUS) & K_OBUF_FUL) == 0);
+ while ((inb(K_STATUS) & K_OBUF_FUL) == 0)
+ ;
return(inb(K_RDWR));
}
void
-kd_cmdreg_write(val)
+kd_cmdreg_write(int val)
{
int ch=KC_CMD_WRITE;
- while (inb(K_STATUS) & K_IBUF_FUL);
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
outb(K_CMD, ch);
- while (inb(K_STATUS) & K_IBUF_FUL);
+ while (inb(K_STATUS) & K_IBUF_FUL)
+ ;
outb(K_RDWR, val);
}
void
-kd_mouse_drain()
+kd_mouse_drain(void)
{
int i;
- while(inb(K_STATUS) & K_IBUF_FUL);
+ while(inb(K_STATUS) & K_IBUF_FUL)
+ ;
while((i = inb(K_STATUS)) & K_OBUF_FUL)
printf("kbd: S = %x D = %x\n", i, inb(K_RDWR));
}
@@ -2322,8 +2305,7 @@ kd_mouse_drain()
* Set kd_state and update the keyboard status LEDs.
*/
void
-set_kd_state(newstate)
-int newstate;
+set_kd_state(int newstate)
{
kd_state = newstate;
kd_setleds1(state2leds(newstate));
@@ -2336,8 +2318,7 @@ int newstate;
* a state vector.
*/
u_char
-state2leds(state)
-int state;
+state2leds(int state)
{
u_char result = 0;
@@ -2354,8 +2335,7 @@ int state;
* Set the keyboard LEDs according to the given byte.
*/
void
-kd_setleds1(val)
-u_char val;
+kd_setleds1(u_char val)
{
if (kd_ack != NOT_WAITING) {
#ifdef MACH_KBD
@@ -2370,7 +2350,7 @@ u_char val;
}
void
-kd_setleds2()
+kd_setleds2(void)
{
kd_senddata(kd_nextled);
}
@@ -2384,8 +2364,7 @@ kd_setleds2()
* lock anyway.
*/
void
-cnsetleds(val)
-u_char val;
+cnsetleds(u_char val)
{
kd_senddata(K_CMD_LEDS);
(void)kd_getdata(); /* XXX - assume is ACK */
@@ -2394,7 +2373,7 @@ u_char val;
}
void
-kdreboot()
+kdreboot(void)
{
(*kd_dreset)();
@@ -2413,7 +2392,7 @@ static int which_button[] = {0, MOUSE_LEFT, MOUSE_MIDDLE, MOUSE_RIGHT};
static struct mouse_motion moved;
int
-kd_kbd_magic(scancode)
+kd_kbd_magic(int scancode)
{
int new_button = 0;
@@ -2507,14 +2486,18 @@ int new_button = 0;
* Initialization specific to character-based graphics adapters.
*/
void
-kd_xga_init()
+kd_xga_init(void)
{
csrpos_t xga_getpos();
unsigned char screen;
+ unsigned char start, stop;
outb(CMOS_ADDR, CMOS_EB);
screen = inb(CMOS_DATA) & CM_SCRMSK;
switch(screen) {
+ default:
+ printf("kd: unknown screen type, defaulting to EGA\n");
+ /* FALLTHROUGH */
case CM_EGA_VGA:
/*
* Here we'll want to query to bios on the card
@@ -2540,6 +2523,8 @@ kd_xga_init()
addr[i] = 0x00;
}
break;
+#if 0
+ /* XXX: some buggy BIOSes report these... */
case CM_CGA_40:
vid_start = (u_char *)phystokv(CGA_START);
kd_index_reg = CGA_IDX_REG;
@@ -2561,8 +2546,25 @@ kd_xga_init()
kd_lines = 25;
kd_cols = 80;
break;
- default:
- printf("kd: unknown screen type, defaulting to EGA\n");
+#endif
+ }
+
+ outb(kd_index_reg, C_START);
+ start = inb(kd_io_reg);
+ /* Make sure cursor is enabled */
+ start &= ~0x20;
+ outb(kd_io_reg, start);
+ outb(kd_index_reg, C_STOP);
+ stop = inb(kd_io_reg);
+
+ if (!start && !stop)
+ {
+ /* Some firmware seem not to be initializing the cursor size
+ * any more... Try using standard values. */
+ outb(kd_index_reg, C_START);
+ outb(kd_io_reg, 14);
+ outb(kd_index_reg, C_STOP);
+ outb(kd_io_reg, 15);
}
kd_setpos(xga_getpos());
@@ -2580,7 +2582,7 @@ kd_xga_init()
*
*/
csrpos_t
-xga_getpos()
+xga_getpos(void)
{
unsigned char low;
@@ -2683,38 +2685,24 @@ char chattr;
* No-op reset routine for kd_dreset.
*/
static void
-kd_noopreset()
+kd_noopreset(void)
{
}
-
-/*
- * Generic routines for bitmap devices (i.e., assume no hardware
- * assist). Assumes a simple byte ordering (i.e., a byte at a lower
- * address is to the left of the byte at the next higher address).
- * For the 82786, this works anyway if the characters are 2 bytes
- * wide. (more bubble gum and paper clips.)
- *
- * See the comments above about SLAMBPW.
- */
-
-void bmpch2bit(), bmppaintcsr();
-u_char *bit2fbptr();
-
-
/*
* bmpput: Copy a character from the font to the frame buffer.
*/
void
-bmpput(pos, ch, chattr)
-csrpos_t pos;
-char ch, chattr;
+bmpput(
+ csrpos_t pos,
+ char ch,
+ char chattr)
{
short xbit, ybit; /* u/l corner of char pos */
- register u_char *to, *from;
- register short i, j;
+ u_char *to, *from;
+ short i, j;
u_char mask = (chattr == KA_REVERSE ? 0xff : 0);
if ((u_char)ch >= chars_in_font)
@@ -2736,13 +2724,14 @@ char ch, chattr;
* another.
*/
void
-bmpcp1char(from, to)
-csrpos_t from, to;
+bmpcp1char(
+ csrpos_t from,
+ csrpos_t to)
{
short from_xbit, from_ybit;
short to_xbit, to_ybit;
- register u_char *tp, *fp;
- register short i, j;
+ u_char *tp, *fp;
+ short i, j;
bmpch2bit(from, &from_xbit, &from_ybit);
bmpch2bit(to, &to_xbit, &to_ybit);
@@ -2762,9 +2751,10 @@ csrpos_t from, to;
* bmpvmup: Copy a block of character positions upwards.
*/
void
-bmpmvup(from, to, count)
-csrpos_t from, to;
-int count;
+bmpmvup(
+ csrpos_t from,
+ csrpos_t to,
+ int count)
{
short from_xbit, from_ybit;
short to_xbit, to_ybit;
@@ -2797,9 +2787,10 @@ int count;
* bmpmvdown: copy a block of characters down.
*/
void
-bmpmvdown(from, to, count)
-csrpos_t from, to;
-int count;
+bmpmvdown(
+ csrpos_t from,
+ csrpos_t to,
+ int count)
{
short from_xbit, from_ybit;
short to_xbit, to_ybit;
@@ -2835,12 +2826,12 @@ int count;
* bmpclear: clear one or more character positions.
*/
void
-bmpclear(to, count, chattr)
-csrpos_t to; /* 1st char */
-int count; /* num chars */
-char chattr; /* reverse or normal */
+bmpclear(
+ csrpos_t to, /* 1st char */
+ int count, /* num chars */
+ char chattr) /* reverse or normal */
{
- register short i;
+ short i;
u_short clearval;
u_short clearbyte = (chattr == KA_REVERSE ? char_white : char_black);
@@ -2861,8 +2852,7 @@ char chattr; /* reverse or normal */
* bmpsetcursor: update the display and set the logical cursor.
*/
void
-bmpsetcursor(pos)
-csrpos_t pos;
+bmpsetcursor(csrpos_t pos)
{
/* erase old cursor & paint new one */
bmppaintcsr(kd_curpos, char_black);
@@ -2874,13 +2864,13 @@ csrpos_t pos;
* bmppaintcsr: paint cursor bits.
*/
void
-bmppaintcsr(pos, val)
-csrpos_t pos;
-u_char val;
+bmppaintcsr(
+ csrpos_t pos,
+ u_char val)
{
short xbit, ybit;
- register u_char *cp;
- register short line, byte;
+ u_char *cp;
+ short line, byte;
bmpch2bit(pos, &xbit, &ybit);
ybit += char_height; /* position at bottom of line */
@@ -2897,11 +2887,12 @@ u_char val;
* (0, 0) is the upper left corner.
*/
void
-bmpch2bit(pos, xb, yb)
-csrpos_t pos;
-short *xb, *yb; /* x, y bit positions, u/l corner */
+bmpch2bit(
+ csrpos_t pos,
+ short *xb,
+ short *yb) /* x, y bit positions, u/l corner */
{
- register short xch, ych;
+ short xch, ych;
xch = (pos / ONE_SPACE) % kd_cols;
ych = pos / (ONE_SPACE * kd_cols);
@@ -2916,8 +2907,9 @@ short *xb, *yb; /* x, y bit positions, u/l corner */
* byte.
*/
u_char *
-bit2fbptr(xb, yb)
-short xb, yb;
+bit2fbptr(
+ short xb,
+ short yb)
{
return(vid_start + yb * fb_byte_width + xb/8);
}
@@ -2973,7 +2965,7 @@ kdcnputc(dev_t dev, int c)
/* Note that tab is handled in kd_putc */
if (c == '\n')
kd_putc('\r');
- kd_putc(c);
+ kd_putc_esc(c);
return 0;
}
@@ -3052,6 +3044,39 @@ kdcnmaygetc(void)
#ifdef notdef
cnsetleds(state2leds(kd_state));
#endif
+ } else if (! up
+ && c == K_ESC
+ && key_map[scancode][char_idx+1] == 0x5b) {
+ /* As a convenience for the nice
+ people using our debugger, remap
+ some keys to the readline-like
+ shortcuts supported by dde.
+
+ XXX This is a workaround for the
+ limited kernel getchar interface.
+ It is only used by the debugger. */
+ c = key_map[scancode][char_idx+2];
+ switch (c) {
+#define _MAP(A,B,C) (C)
+#define MAP(T) _MAP(T)
+#define CTRL(c) ((c) & 0x1f)
+ case MAP(K_HOME): c = CTRL('a'); break;
+ case MAP(K_UA): c = CTRL('p'); break;
+ case MAP(K_LA): c = CTRL('b'); break;
+ case MAP(K_RA): c = CTRL('f'); break;
+ case MAP(K_DA): c = CTRL('n'); break;
+ case MAP(K_END): c = CTRL('e'); break;
+ /* delete */
+ case 0x39: c = CTRL('d'); break;
+#undef CTRL
+#undef MAP
+#undef _MAP
+ default:
+ /* Retain the old behavior. */
+ c = K_ESC;
+ }
+
+ return(c);
} else if (!up) {
/* regular key-down */
if (c == K_CR)
diff --git a/i386/i386at/kd.h b/i386/i386at/kd.h
index 1d53538b..60cee7e3 100644
--- a/i386/i386at/kd.h
+++ b/i386/i386at/kd.h
@@ -76,7 +76,10 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <sys/types.h>
#include <sys/time.h>
#include <device/cons.h>
-
+#include <device/io_req.h>
+#include <device/buf.h>
+#include <device/tty.h>
+#include <i386at/kdsoft.h>
/*
* Where memory for various graphics adapters starts.
@@ -110,6 +113,8 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
/*
* Commands sent to graphics adapter.
*/
+#define C_START 0x0a /* return cursor line start */
+#define C_STOP 0x0b /* return cursor line stop */
#define C_LOW 0x0f /* return low byte of cursor addr */
#define C_HIGH 0x0e /* high byte */
@@ -359,10 +364,10 @@ typedef u_char Scancode;
* Other mappable non-Ascii keys (e.g., "ctrl") are represented by a
* two-byte sequence: K_SCAN, followed by the key's scan code.
*/
-#define K_DONE 0xff /* must be same as NC */
-#define NC 0xff /* No character defined */
+#define K_DONE 0xffu /* must be same as NC */
+#define NC 0xffu /* No character defined */
-#define K_SCAN 0xfe /* followed by scan code */
+#define K_SCAN 0xfeu /* followed by scan code */
/* ascii char set */
#define K_NUL 0x00 /* Null character */
@@ -697,6 +702,7 @@ extern void kd_setleds1 (u_char);
extern void kd_setleds2 (void);
extern void cnsetleds (u_char);
extern void kdreboot (void);
+extern void kd_putc_esc (u_char);
extern void kd_putc (u_char);
extern void kd_parseesc (void);
extern void kd_down (void);
@@ -728,13 +734,14 @@ extern int kdsetbell (int, int);
extern void kd_resend (void);
extern void kd_handle_ack (void);
extern int kd_kbd_magic (int);
-extern int kdstate2idx (int, boolean_t);
+extern unsigned int kdstate2idx (unsigned int, boolean_t);
extern void kd_parserest (u_char *);
extern int kdcnprobe(struct consdev *cp);
extern int kdcninit(struct consdev *cp);
extern int kdcngetc(dev_t dev, int wait);
extern int kdcnmaygetc (void);
extern int kdcnputc(dev_t dev, int c);
+extern void kd_setpos(csrpos_t newpos);
extern void kd_slmwd (void *start, int count, int value);
extern void kd_slmscu (void *from, void *to, int count);
@@ -742,4 +749,53 @@ extern void kd_slmscd (void *from, void *to, int count);
extern void kdintr(int vec);
+#if MACH_KDB
+extern void kdb_kintr(void);
+#endif /* MACH_KDB */
+
+extern int kdopen(dev_t dev, int flag, io_req_t ior);
+extern void kdclose(dev_t dev, int flag);
+extern int kdread(dev_t dev, io_req_t uio);
+extern int kdwrite(dev_t dev, io_req_t uio);
+
+extern io_return_t kdgetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ natural_t *count);
+
+extern io_return_t kdsetstat(
+ dev_t dev,
+ int flavor,
+ int * data,
+ natural_t count);
+
+extern int kdportdeath(dev_t dev, mach_port_t port);
+extern int kdmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+boolean_t kdcheckmagic(Scancode scancode);
+
+int do_modifier(int state, Scancode c, boolean_t up);
+
+/*
+ * Generic routines for bitmap devices (i.e., assume no hardware
+ * assist). Assumes a simple byte ordering (i.e., a byte at a lower
+ * address is to the left of the byte at the next higher address).
+ * For the 82786, this works anyway if the characters are 2 bytes
+ * wide. (more bubble gum and paper clips.)
+ *
+ * See the comments above (in i386at/kd.c) about SLAMBPW.
+ */
+void bmpch2bit(csrpos_t pos, short *xb, short *yb);
+void bmppaintcsr(csrpos_t pos, u_char val);
+u_char *bit2fbptr(short xb, short yb);
+
+unsigned char kd_getdata(void);
+unsigned char state2leds(int state);
+
+void kdstart(struct tty *tp);
+void kdstop(struct tty *tp, int flags);
+
+void kd_xga_init(void);
+
#endif /* _KD_H_ */
diff --git a/i386/i386at/kd_event.c b/i386/i386at/kd_event.c
index 4d2ea008..694c165e 100644
--- a/i386/i386at/kd_event.c
+++ b/i386/i386at/kd_event.c
@@ -83,11 +83,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
kd_event_queue kbd_queue; /* queue of keyboard events */
queue_head_t kbd_read_queue = { &kbd_read_queue, &kbd_read_queue };
-
-void kbd_enqueue();
-io_return_t X_kdb_enter_init();
-io_return_t X_kdb_exit_init();
-
static boolean_t initialized = FALSE;
@@ -96,7 +91,7 @@ static boolean_t initialized = FALSE;
*/
void
-kbdinit()
+kbdinit(void)
{
spl_t s = SPLKD();
@@ -115,9 +110,10 @@ kbdinit()
/*ARGSUSED*/
int
-kbdopen(dev, flags)
+kbdopen(dev, flags, ior)
dev_t dev;
int flags;
+ io_req_t ior;
{
spl_t o_pri = spltty();
kdinit();
@@ -135,9 +131,9 @@ kbdopen(dev, flags)
/*ARGSUSED*/
void
-kbdclose(dev, flags)
- dev_t dev;
- int flags;
+kbdclose(
+ dev_t dev,
+ int flags)
{
spl_t s = SPLKD();
@@ -147,11 +143,11 @@ kbdclose(dev, flags)
}
-io_return_t kbdgetstat(dev, flavor, data, count)
- dev_t dev;
- int flavor;
- int * data; /* pointer to OUT array */
- unsigned int *count; /* OUT */
+io_return_t kbdgetstat(
+ dev_t dev,
+ int flavor,
+ int * data, /* pointer to OUT array */
+ unsigned int *count) /* OUT */
{
switch (flavor) {
case KDGKBDTYPE:
@@ -169,11 +165,11 @@ io_return_t kbdgetstat(dev, flavor, data, count)
return (D_SUCCESS);
}
-io_return_t kbdsetstat(dev, flavor, data, count)
- dev_t dev;
- int flavor;
- int * data;
- unsigned int count;
+io_return_t kbdsetstat(
+ dev_t dev,
+ int flavor,
+ int * data,
+ unsigned int count)
{
switch (flavor) {
case KDSKBDMODE:
@@ -187,9 +183,9 @@ io_return_t kbdsetstat(dev, flavor, data, count)
kd_setleds1 (*data);
break;
case K_X_KDB_ENTER:
- return X_kdb_enter_init(data, count);
+ return X_kdb_enter_init((unsigned int *)data, count);
case K_X_KDB_EXIT:
- return X_kdb_exit_init(data, count);
+ return X_kdb_exit_init((unsigned int *)data, count);
default:
return (D_INVALID_OPERATION);
}
@@ -201,16 +197,13 @@ io_return_t kbdsetstat(dev, flavor, data, count)
/*
* kbdread - dequeue and return any queued events.
*/
-
-boolean_t kbd_read_done(); /* forward */
-
int
-kbdread(dev, ior)
- dev_t dev;
- register io_req_t ior;
+kbdread(
+ dev_t dev,
+ io_req_t ior)
{
- register int err, count;
- register spl_t s;
+ int err, count;
+ spl_t s;
/* Check if IO_COUNT is a multiple of the record size. */
if (ior->io_count % sizeof(kd_event) != 0)
@@ -233,7 +226,7 @@ kbdread(dev, ior)
}
count = 0;
while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
- register kd_event *ev;
+ kd_event *ev;
ev = kdq_get(&kbd_queue);
*(kd_event *)(&ior->io_data[count]) = *ev;
@@ -244,11 +237,10 @@ kbdread(dev, ior)
return (D_SUCCESS);
}
-boolean_t kbd_read_done(ior)
- register io_req_t ior;
+boolean_t kbd_read_done(io_req_t ior)
{
- register int count;
- register spl_t s;
+ int count;
+ spl_t s;
s = SPLKD();
if (kdq_empty(&kbd_queue)) {
@@ -260,7 +252,7 @@ boolean_t kbd_read_done(ior)
count = 0;
while (!kdq_empty(&kbd_queue) && count < ior->io_count) {
- register kd_event *ev;
+ kd_event *ev;
ev = kdq_get(&kbd_queue);
*(kd_event *)(&ior->io_data[count]) = *ev;
@@ -281,8 +273,7 @@ boolean_t kbd_read_done(ior)
*/
void
-kd_enqsc(sc)
- Scancode sc;
+kd_enqsc(Scancode sc)
{
kd_event ev;
@@ -299,16 +290,15 @@ kd_enqsc(sc)
*/
void
-kbd_enqueue(ev)
- kd_event *ev;
+kbd_enqueue(kd_event *ev)
{
if (kdq_full(&kbd_queue))
- printf("kbd: queue full\n");
+ printf_once("kbd: queue full\n");
else
kdq_put(&kbd_queue, ev);
{
- register io_req_t ior;
+ io_req_t ior;
while ((ior = (io_req_t)dequeue_head(&kbd_read_queue)) != 0)
iodone(ior);
}
@@ -319,9 +309,9 @@ int X_kdb_enter_len = 0, X_kdb_exit_len = 0;
void
kdb_in_out(p)
-u_int *p;
+const u_int *p;
{
-register int t = p[0];
+ int t = p[0];
switch (t & K_X_TYPE) {
case K_X_IN|K_X_BYTE:
@@ -351,9 +341,9 @@ register int t = p[0];
}
void
-X_kdb_enter()
+X_kdb_enter(void)
{
-register u_int *u_ip, *endp;
+ u_int *u_ip, *endp;
for (u_ip = X_kdb_enter_str, endp = &X_kdb_enter_str[X_kdb_enter_len];
u_ip < endp;
@@ -362,9 +352,9 @@ register u_int *u_ip, *endp;
}
void
-X_kdb_exit()
+X_kdb_exit(void)
{
-register u_int *u_ip, *endp;
+ u_int *u_ip, *endp;
for (u_ip = X_kdb_exit_str, endp = &X_kdb_exit_str[X_kdb_exit_len];
u_ip < endp;
@@ -373,9 +363,9 @@ register u_int *u_ip, *endp;
}
io_return_t
-X_kdb_enter_init(data, count)
- u_int *data;
- u_int count;
+X_kdb_enter_init(
+ u_int *data,
+ u_int count)
{
if (count * sizeof X_kdb_enter_str[0] > sizeof X_kdb_enter_str)
return D_INVALID_OPERATION;
@@ -386,9 +376,9 @@ X_kdb_enter_init(data, count)
}
io_return_t
-X_kdb_exit_init(data, count)
- u_int *data;
- u_int count;
+X_kdb_exit_init(
+ u_int *data,
+ u_int count)
{
if (count * sizeof X_kdb_exit_str[0] > sizeof X_kdb_exit_str)
return D_INVALID_OPERATION;
diff --git a/i386/i386at/kd_event.h b/i386/i386at/kd_event.h
index 677af99b..8b2d6642 100644
--- a/i386/i386at/kd_event.h
+++ b/i386/i386at/kd_event.h
@@ -26,8 +26,37 @@
#ifndef _KD_EVENT_H_
#define _KD_EVENT_H_
+#include <sys/types.h>
+#include <device/io_req.h>
+#include <i386at/kd.h>
+
extern void X_kdb_enter (void);
extern void X_kdb_exit (void);
+extern int kbdopen(dev_t dev, int flags, io_req_t ior);
+extern void kbdclose(dev_t dev, int flags);
+extern int kbdread(dev_t dev, io_req_t ior);
+
+extern io_return_t kbdgetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ unsigned int *count);
+
+extern io_return_t kbdsetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ unsigned int count);
+
+extern void kd_enqsc(Scancode sc);
+
+void kbd_enqueue(kd_event *ev);
+
+io_return_t X_kdb_enter_init(u_int *data, u_int count);
+io_return_t X_kdb_exit_init(u_int *data, u_int count);
+
+boolean_t kbd_read_done(io_req_t ior);
+
#endif /* _KD_EVENT_H_ */
diff --git a/i386/i386at/kd_mouse.c b/i386/i386at/kd_mouse.c
index 6e7b68a8..ece13efe 100644
--- a/i386/i386at/kd_mouse.c
+++ b/i386/i386at/kd_mouse.c
@@ -101,8 +101,6 @@ u_char lastbuttons; /* previous state of mouse buttons */
#define MOUSE_DOWN 0
#define MOUSE_ALL_UP 0x7
-void mouseintr();
-void mouse_enqueue();
int mouse_baud = BCNT1200;
boolean_t mouse_char_cmd = FALSE; /* mouse response is to cmd */
@@ -114,7 +112,7 @@ int mouse_char_index; /* mouse response */
* init_mouse_hw - initialize the serial port.
*/
void
-init_mouse_hw(unit, mode)
+init_mouse_hw(dev_t unit, int mode)
{
unsigned short base_addr = cominfo[unit]->address;
@@ -149,9 +147,10 @@ int track_man[10];
/*ARGSUSED*/
int
-mouseopen(dev, flags)
+mouseopen(dev, flags, ior)
dev_t dev;
int flags;
+ io_req_t ior;
{
if (mouse_in_use)
return (D_ALREADY_OPEN);
@@ -164,6 +163,7 @@ mouseopen(dev, flags)
mousebufsize = 3;
serial_mouse_open(dev);
init_mouse_hw(dev&7, LC7);
+ break;
case MICROSOFT_MOUSE:
mousebufsize = 3;
serial_mouse_open(dev);
@@ -198,8 +198,7 @@ mouseopen(dev, flags)
}
void
-serial_mouse_open(dev)
- dev_t dev;
+serial_mouse_open(dev_t dev)
{
int unit = minor(dev) & 0x7;
int mouse_pic = cominfo[unit]->sysdep1;
@@ -219,12 +218,11 @@ serial_mouse_open(dev)
int mouse_packets = 0;
void
-kd_mouse_open(dev, mouse_pic)
- dev_t dev;
- int mouse_pic;
+kd_mouse_open(
+ dev_t dev,
+ int mouse_pic)
{
spl_t s = splhi(); /* disable interrupts */
- extern void kdintr();
oldvect = ivect[mouse_pic];
ivect[mouse_pic] = kdintr;
@@ -239,9 +237,9 @@ kd_mouse_open(dev, mouse_pic)
* and restore the serial port interrupt vector.
*/
void
-mouseclose(dev, flags)
- dev_t dev;
- int flags;
+mouseclose(
+ dev_t dev,
+ int flags)
{
switch (mouse_type) {
case MICROSOFT_MOUSE:
@@ -266,9 +264,9 @@ mouseclose(dev, flags)
/*ARGSUSED*/
void
-serial_mouse_close(dev, flags)
- dev_t dev;
- int flags;
+serial_mouse_close(
+ dev_t dev,
+ int flags)
{
spl_t o_pri = splhi(); /* mutex with open() */
int unit = minor(dev) & 0x7;
@@ -285,9 +283,9 @@ serial_mouse_close(dev, flags)
}
void
-kd_mouse_close(dev, mouse_pic)
- dev_t dev;
- int mouse_pic;
+kd_mouse_close(
+ dev_t dev,
+ int mouse_pic)
{
spl_t s = splhi();
@@ -297,11 +295,11 @@ kd_mouse_close(dev, mouse_pic)
splx(s);
}
-io_return_t mousegetstat(dev, flavor, data, count)
- dev_t dev;
- int flavor;
- int * data; /* pointer to OUT array */
- unsigned int *count; /* OUT */
+io_return_t mousegetstat(
+ dev_t dev,
+ int flavor,
+ int * data, /* pointer to OUT array */
+ unsigned int *count) /* OUT */
{
switch (flavor) {
case DEV_GET_SIZE:
@@ -319,15 +317,13 @@ io_return_t mousegetstat(dev, flavor, data, count)
/*
* mouseread - dequeue and return any queued events.
*/
-boolean_t mouse_read_done(); /* forward */
-
int
-mouseread(dev, ior)
- dev_t dev;
- register io_req_t ior;
+mouseread(
+ dev_t dev,
+ io_req_t ior)
{
- register int err, count;
- register spl_t s;
+ int err, count;
+ spl_t s;
/* Check if IO_COUNT is a multiple of the record size. */
if (ior->io_count % sizeof(kd_event) != 0)
@@ -350,7 +346,7 @@ mouseread(dev, ior)
}
count = 0;
while (!kdq_empty(&mouse_queue) && count < ior->io_count) {
- register kd_event *ev;
+ kd_event *ev;
ev = kdq_get(&mouse_queue);
*(kd_event *)(&ior->io_data[count]) = *ev;
@@ -361,11 +357,10 @@ mouseread(dev, ior)
return (D_SUCCESS);
}
-boolean_t mouse_read_done(ior)
- register io_req_t ior;
+boolean_t mouse_read_done(io_req_t ior)
{
- register int count;
- register spl_t s;
+ int count;
+ spl_t s;
s = SPLKD();
if (kdq_empty(&mouse_queue)) {
@@ -377,7 +372,7 @@ boolean_t mouse_read_done(ior)
count = 0;
while (!kdq_empty(&mouse_queue) && count < ior->io_count) {
- register kd_event *ev;
+ kd_event *ev;
ev = kdq_get(&mouse_queue);
*(kd_event *)(&ior->io_data[count]) = *ev;
@@ -397,7 +392,7 @@ boolean_t mouse_read_done(ior)
* mouseintr - Get a byte and pass it up for handling. Called at SPLKD.
*/
void
-mouseintr(unit)
+mouseintr(int unit)
{
unsigned short base_addr = cominfo[unit]->address;
unsigned char id, ls;
@@ -445,8 +440,7 @@ int middlegitech = 0; /* what should the middle button be */
static u_char mousebuf[MOUSEBUFSIZE]; /* 5-byte packet from mouse */
void
-mouse_handle_byte(ch)
- u_char ch;
+mouse_handle_byte(u_char ch)
{
if (show_mouse_byte) {
printf("%x(%c) ", ch, ch);
@@ -527,8 +521,7 @@ mouse_handle_byte(ch)
}
void
-mouse_packet_mouse_system_mouse(mousebuf)
-u_char mousebuf[MOUSEBUFSIZE];
+mouse_packet_mouse_system_mouse(u_char mousebuf[MOUSEBUFSIZE])
{
u_char buttons, buttonchanges;
struct mouse_motion moved;
@@ -563,8 +556,7 @@ u_char mousebuf[MOUSEBUFSIZE];
*
*/
void
-mouse_packet_microsoft_mouse(mousebuf)
-u_char mousebuf[MOUSEBUFSIZE];
+mouse_packet_microsoft_mouse(u_char mousebuf[MOUSEBUFSIZE])
{
u_char buttons, buttonchanges;
struct mouse_motion moved;
@@ -638,6 +630,8 @@ int kd_mouse_read(void)
while (mousebufindex <= mouse_char_index) {
mouse_char_wanted = TRUE;
assert_wait((event_t) &mousebuf, FALSE);
+ /* We are at tty SPL level, interrupts can not happen between
+ * assert_wait and thread_block. */
thread_block((void (*)()) 0);
}
@@ -656,8 +650,7 @@ void kd_mouse_read_reset(void)
}
void
-ibm_ps2_mouse_open(dev)
- dev_t dev;
+ibm_ps2_mouse_open(dev_t dev)
{
spl_t s = spltty();
@@ -700,8 +693,7 @@ ibm_ps2_mouse_open(dev)
}
void
-ibm_ps2_mouse_close(dev)
- dev_t dev;
+ibm_ps2_mouse_close(dev_t dev)
{
spl_t s = spltty();
@@ -732,8 +724,7 @@ ibm_ps2_mouse_close(dev)
*
*/
void
-mouse_packet_ibm_ps2_mouse(mousebuf)
-u_char mousebuf[MOUSEBUFSIZE];
+mouse_packet_ibm_ps2_mouse(u_char mousebuf[MOUSEBUFSIZE])
{
u_char buttons, buttonchanges;
struct mouse_motion moved;
@@ -765,8 +756,7 @@ u_char mousebuf[MOUSEBUFSIZE];
* Enqueue a mouse-motion event. Called at SPLKD.
*/
void
-mouse_moved(where)
- struct mouse_motion where;
+mouse_moved(struct mouse_motion where)
{
kd_event ev;
@@ -776,14 +766,13 @@ mouse_moved(where)
mouse_enqueue(&ev);
}
-
/*
* Enqueue an event for mouse button press or release. Called at SPLKD.
*/
void
-mouse_button(which, direction)
- kev_type which;
- u_char direction;
+mouse_button(
+ kev_type which,
+ u_char direction)
{
kd_event ev;
@@ -793,23 +782,21 @@ mouse_button(which, direction)
mouse_enqueue(&ev);
}
-
/*
* mouse_enqueue - enqueue an event and wake up selecting processes, if
* any. Called at SPLKD.
*/
void
-mouse_enqueue(ev)
- kd_event *ev;
+mouse_enqueue(kd_event *ev)
{
if (kdq_full(&mouse_queue))
- printf("mouse: queue full\n");
+ printf_once("mouse: queue full\n");
else
kdq_put(&mouse_queue, ev);
{
- register io_req_t ior;
+ io_req_t ior;
while ((ior = (io_req_t)dequeue_head(&mouse_read_queue)) != 0)
iodone(ior);
}
diff --git a/i386/i386at/kd_mouse.h b/i386/i386at/kd_mouse.h
index baa51c8a..a8a72a3b 100644
--- a/i386/i386at/kd_mouse.h
+++ b/i386/i386at/kd_mouse.h
@@ -54,4 +54,17 @@ extern void mouse_packet_mouse_system_mouse (u_char *mousebuf);
extern void mouse_packet_ibm_ps2_mouse (u_char *mousebuf);
+extern int mouseopen(dev_t dev, int flags, io_req_t ior);
+extern void mouseclose(dev_t dev, int flags);
+extern int mouseread(dev_t dev, io_req_t ior);
+
+extern io_return_t mousegetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ unsigned int *count);
+
+void mouseintr(int unit);
+boolean_t mouse_read_done(io_req_t ior);
+
#endif /* _KD_MOUSE_H_ */
diff --git a/i386/i386at/kd_queue.c b/i386/i386at/kd_queue.c
index 2b83044a..57d6fbf7 100644
--- a/i386/i386at/kd_queue.c
+++ b/i386/i386at/kd_queue.c
@@ -72,14 +72,14 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
boolean_t
kdq_empty(q)
- kd_event_queue *q;
+ const kd_event_queue *q;
{
return(q->firstfree == q->firstout);
}
boolean_t
kdq_full(q)
- kd_event_queue *q;
+ const kd_event_queue *q;
{
return(q_next(q->firstfree) == q->firstout);
}
@@ -98,8 +98,7 @@ kdq_put(q, ev)
}
kd_event *
-kdq_get(q)
- kd_event_queue *q;
+kdq_get(kd_event_queue *q)
{
kd_event *result = q->events + q->firstout;
@@ -108,8 +107,7 @@ kdq_get(q)
}
void
-kdq_reset(q)
- kd_event_queue *q;
+kdq_reset(kd_event_queue *q)
{
q->firstout = q->firstfree = 0;
}
diff --git a/i386/i386at/kd_queue.h b/i386/i386at/kd_queue.h
index c976acfa..702efe8a 100644
--- a/i386/i386at/kd_queue.h
+++ b/i386/i386at/kd_queue.h
@@ -64,6 +64,9 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
* /dev/mouse.
*/
+#ifndef _KD_QUEUE_H_
+#define _KD_QUEUE_H_
+
#include <mach/std_types.h>
#include <i386at/kd.h>
@@ -76,6 +79,8 @@ typedef struct {
extern void kdq_put(kd_event_queue *, kd_event *);
extern void kdq_reset(kd_event_queue *);
-extern boolean_t kdq_empty(kd_event_queue *);
-extern boolean_t kdq_full(kd_event_queue *);
+extern boolean_t kdq_empty(const kd_event_queue *);
+extern boolean_t kdq_full(const kd_event_queue *);
extern kd_event *kdq_get(kd_event_queue *);
+
+#endif /* _KD_QUEUE_H_ */
diff --git a/i386/i386at/kdsoft.h b/i386/i386at/kdsoft.h
index 96e2df8c..1dfd2b2c 100644
--- a/i386/i386at/kdsoft.h
+++ b/i386/i386at/kdsoft.h
@@ -57,6 +57,9 @@ NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUR OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#ifndef _KDSOFT_H_
+#define _KDSOFT_H_
+
/*
* Globals used for both character-based controllers and bitmap-based
* controllers.
@@ -203,3 +206,5 @@ extern short xstart, ystart;
extern short char_byte_width; /* char_width/8 */
extern short fb_byte_width; /* fb_width/8 */
extern short font_byte_width; /* num bytes in 1 scan line of font */
+
+#endif /* _KDSOFT_H_ */
diff --git a/i386/i386at/lpr.c b/i386/i386at/lpr.c
index c92795ef..73c4261b 100644
--- a/i386/i386at/lpr.c
+++ b/i386/i386at/lpr.c
@@ -44,20 +44,12 @@
#include <i386/pio.h>
#include <chips/busses.h>
#include <i386at/autoconf.h>
-#include <i386at/lprreg.h>
+#include <i386at/lpr.h>
-
/*
* Driver information for auto-configuration stuff.
*/
-int lprprobe();
-void lprstop();
-void lprintr(), lprstart();
-void lprattach(struct bus_device *);
-int lprgetstat(), lprsetstat();
-void lprpr_addr();
-
struct bus_device *lprinfo[NLPR]; /* ??? */
static vm_offset_t lpr_std[NLPR] = { 0 };
@@ -71,13 +63,14 @@ int lpr_alive[NLPR];
int
lprprobe(port, dev)
-struct bus_device *dev;
+vm_offset_t port;
+struct bus_ctlr *dev;
{
u_short addr = (u_short) dev->address;
int unit = dev->unit;
int ret;
- if ((unit < 0) || (unit > NLPR)) {
+ if ((unit < 0) || (unit >= NLPR)) {
printf("com %d out of range\n", unit);
return(0);
}
@@ -112,22 +105,24 @@ void lprattach(struct bus_device *dev)
}
int
-lpropen(dev, flag, ior)
-int dev;
-int flag;
-io_req_t ior;
+lpropen(dev_t dev, int flag, io_req_t ior)
{
-int unit = minor(dev);
-struct bus_device *isai;
-struct tty *tp;
-u_short addr;
-
- if (unit >= NLPR || (isai = lprinfo[unit]) == 0 || isai->alive == 0)
- return (D_NO_SUCH_DEVICE);
+ int unit = minor(dev);
+ struct bus_device *isai;
+ struct tty *tp;
+ u_short addr;
+
+ if (unit >= NLPR)
+ return D_NO_SUCH_DEVICE;
+
+ isai = lprinfo[unit];
+ if (isai == NULL || !isai->alive)
+ return D_NO_SUCH_DEVICE;
+
tp = &lpr_tty[unit];
addr = (u_short) isai->address;
tp->t_dev = dev;
- tp->t_addr = *(caddr_t *)&addr;
+ tp->t_addr = addr;
tp->t_oproc = lprstart;
tp->t_state |= TS_WOPEN;
tp->t_stop = lprstop;
@@ -142,7 +137,7 @@ u_short addr;
void
lprclose(dev, flag)
-int dev;
+dev_t dev;
int flag;
{
int unit = minor(dev);
@@ -158,7 +153,7 @@ u_short addr = (u_short) lprinfo[unit]->address;
int
lprread(dev, ior)
-int dev;
+dev_t dev;
io_req_t ior;
{
return char_read(&lpr_tty[minor(dev)], ior);
@@ -166,7 +161,7 @@ io_req_t ior;
int
lprwrite(dev, ior)
-int dev;
+dev_t dev;
io_req_t ior;
{
return char_write(&lpr_tty[minor(dev)], ior);
@@ -199,11 +194,11 @@ natural_t *count; /* out */
}
io_return_t
-lprsetstat(dev, flavor, data, count)
-dev_t dev;
-int flavor;
-int * data;
-natural_t count;
+lprsetstat(
+ dev_t dev,
+ int flavor,
+ int * data,
+ natural_t count)
{
io_return_t result = D_SUCCESS;
int unit = minor(dev);
@@ -218,10 +213,9 @@ natural_t count;
return (D_SUCCESS);
}
-void lprintr(unit)
-int unit;
+void lprintr(int unit)
{
- register struct tty *tp = &lpr_tty[unit];
+ struct tty *tp = &lpr_tty[unit];
if ((tp->t_state & TS_ISOPEN) == 0)
return;
@@ -233,8 +227,7 @@ int unit;
lprstart(tp);
}
-void lprstart(tp)
-struct tty *tp;
+void lprstart(struct tty *tp)
{
spl_t s = spltty();
u_short addr = (natural_t) tp->t_addr;
@@ -274,22 +267,22 @@ struct tty *tp;
}
void
-lprstop(tp, flags)
-register struct tty *tp;
-int flags;
+lprstop(
+ struct tty *tp,
+ int flags)
{
if ((tp->t_state & TS_BUSY) && (tp->t_state & TS_TTSTOP) == 0)
tp->t_state |= TS_FLUSH;
}
int
-lprpr(unit)
+lprpr(int unit)
{
lprpr_addr(lprinfo[unit]->address);
return 0;
}
void
-lprpr_addr(addr)
+lprpr_addr(unsigned short addr)
{
printf("DATA(%x) %x, STATUS(%x) %x, INTR_ENAB(%x) %x\n",
DATA(addr), inb(DATA(addr)),
diff --git a/i386/i386at/lprreg.h b/i386/i386at/lpr.h
index c6fbed43..269fd643 100644
--- a/i386/i386at/lprreg.h
+++ b/i386/i386at/lpr.h
@@ -27,7 +27,40 @@
* Parallel port printer driver v1.0
* All rights reserved.
*/
+
+#ifndef _LPRREG_H_
+#define _LPRREG_H_
#define DATA(addr) (addr + 0)
#define STATUS(addr) (addr + 1)
#define INTR_ENAB(addr) (addr + 2)
+
+extern void lprintr(int unit);
+int lprprobe(vm_offset_t port, struct bus_ctlr *dev);
+void lprstop(struct tty *tp, int flags);
+void lprstart(struct tty *tp);
+void lprattach(struct bus_device *dev);
+
+extern io_return_t
+lprgetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ natural_t *count);
+
+extern io_return_t
+lprsetstat(
+ dev_t dev,
+ int flavor,
+ int *data,
+ natural_t count);
+
+void lprpr_addr(unsigned short addr);
+
+extern int lpropen(dev_t dev, int flag, io_req_t ior);
+extern void lprclose(dev_t dev, int flag);
+extern int lprread(dev_t dev, io_req_t ior);
+extern int lprwrite(dev_t dev, io_req_t ior);
+extern int lprportdeath(dev_t dev, mach_port_t port);
+
+#endif /* _LPRREG_H_ */
diff --git a/i386/i386at/mem.c b/i386/i386at/mem.c
index 5e51676b..f239afac 100644
--- a/i386/i386at/mem.c
+++ b/i386/i386at/mem.c
@@ -32,7 +32,7 @@
/*ARGSUSED*/
int
memmmap(dev, off, prot)
-int dev;
+dev_t dev;
vm_offset_t off;
vm_prot_t prot;
{
diff --git a/i386/i386at/mem.h b/i386/i386at/mem.h
new file mode 100644
index 00000000..0bc85ea4
--- /dev/null
+++ b/i386/i386at/mem.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MEM_H_
+#define _MEM_H_
+
+extern int memmmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+#endif /* _MEM_H_ */
diff --git a/i386/i386at/model_dep.c b/i386/i386at/model_dep.c
index d97f0850..62763ae1 100644
--- a/i386/i386at/model_dep.c
+++ b/i386/i386at/model_dep.c
@@ -47,7 +47,9 @@
#include <kern/cpu_number.h>
#include <kern/debug.h>
#include <kern/mach_clock.h>
+#include <kern/macros.h>
#include <kern/printf.h>
+#include <kern/startup.h>
#include <sys/time.h>
#include <sys/types.h>
#include <vm/vm_page.h>
@@ -63,10 +65,13 @@
#include <i386/locore.h>
#include <i386/model_dep.h>
#include <i386at/autoconf.h>
+#include <i386at/biosmem.h>
#include <i386at/idt.h>
#include <i386at/int_init.h>
#include <i386at/kd.h>
#include <i386at/rtc.h>
+#include <i386at/model_dep.h>
+#include <i386at/acpihalt.h>
#ifdef MACH_XEN
#include <xen/console.h>
#include <xen/store.h>
@@ -74,14 +79,31 @@
#include <xen/xen.h>
#endif /* MACH_XEN */
+#if ENABLE_IMMEDIATE_CONSOLE
+#include "immc.h"
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
/* Location of the kernel's symbol table.
Both of these are 0 if none is available. */
#if MACH_KDB
+#include <ddb/db_sym.h>
+#include <i386/db_interface.h>
+
+/* a.out symbol table */
static vm_offset_t kern_sym_start, kern_sym_end;
-#else
+
+/* ELF section header */
+static unsigned elf_shdr_num;
+static vm_size_t elf_shdr_size;
+static vm_offset_t elf_shdr_addr;
+static unsigned elf_shdr_shndx;
+
+#else /* MACH_KDB */
#define kern_sym_start 0
#define kern_sym_end 0
-#endif
+#endif /* MACH_KDB */
+
+#define RESERVED_BIOS 0x10000
/* These indicate the total extent of physical memory addresses we're using.
They are page-aligned. */
@@ -107,40 +129,19 @@ struct multiboot_info boot_info;
/* Command line supplied to kernel. */
char *kernel_cmdline = "";
-/* This is used for memory initialization:
- it gets bumped up through physical memory
- that exists and is not occupied by boot gunk.
- It is not necessarily page-aligned. */
-static vm_offset_t avail_next
-#ifndef MACH_HYP
- = 0x1000 /* XX end of BIOS data area */
-#endif /* MACH_HYP */
- ;
-
-/* Possibly overestimated amount of available memory
- still remaining to be handed to the VM system. */
-static vm_size_t avail_remaining;
-
extern char version[];
-extern void setup_main();
-
-void halt_all_cpus (boolean_t reboot) __attribute__ ((noreturn));
-void halt_cpu (void) __attribute__ ((noreturn));
+/* If set, reboot the system on ctrl-alt-delete. */
+boolean_t rebootflag = FALSE; /* exported to kdintr */
-void inittodr(); /* forward */
-
-int rebootflag = 0; /* exported to kdintr */
-
-/* XX interrupt stack pointer and highwater mark, for locore.S. */
-vm_offset_t int_stack_top, int_stack_high;
+/* Interrupt stack. */
+static char int_stack[KERNEL_STACK_SIZE] __aligned(KERNEL_STACK_SIZE);
+vm_offset_t int_stack_top, int_stack_base;
#ifdef LINUX_DEV
extern void linux_init(void);
#endif
-boolean_t init_alloc_aligned(vm_size_t size, vm_offset_t *addrp);
-
/*
* Find devices. The system is alive.
*/
@@ -184,10 +185,14 @@ void machine_init(void)
*(unsigned short *)phystokv(0x472) = 0x1234;
#endif /* MACH_HYP */
+#if VM_MIN_KERNEL_ADDRESS == 0
/*
* Unmap page 0 to trap NULL references.
+ *
+ * Note that this breaks accessing some BIOS areas stored there.
*/
pmap_unmap_page_zero();
+#endif
}
/* Conserve power on processor CPU. */
@@ -201,7 +206,7 @@ void machine_idle (int cpu)
#endif /* MACH_HYP */
}
-void machine_relax ()
+void machine_relax (void)
{
asm volatile ("rep; nop" : : : "memory");
}
@@ -223,8 +228,7 @@ void halt_cpu(void)
/*
* Halt the system or reboot.
*/
-void halt_all_cpus(reboot)
- boolean_t reboot;
+void halt_all_cpus(boolean_t reboot)
{
if (reboot) {
#ifdef MACH_HYP
@@ -233,10 +237,11 @@ void halt_all_cpus(reboot)
kdreboot();
}
else {
- rebootflag = 1;
+ rebootflag = TRUE;
#ifdef MACH_HYP
hyp_halt();
#endif /* MACH_HYP */
+ grub_acpi_halt();
printf("In tight loop: hit ctl-alt-del to reboot\n");
(void) spl0();
}
@@ -249,94 +254,14 @@ void exit(int rc)
halt_all_cpus(0);
}
-void db_reset_cpu(void)
+void db_halt_cpu(void)
{
- halt_all_cpus(1);
+ halt_all_cpus(0);
}
-
-/*
- * Compute physical memory size and other parameters.
- */
-void
-mem_size_init(void)
+void db_reset_cpu(void)
{
- vm_offset_t max_phys_size;
-
- /* Physical memory on all PCs starts at physical address 0.
- XX make it a constant. */
- phys_first_addr = 0;
-
-#ifdef MACH_HYP
- if (boot_info.nr_pages >= 0x100000) {
- printf("Truncating memory size to 4GiB\n");
- phys_last_addr = 0xffffffffU;
- } else
- phys_last_addr = boot_info.nr_pages * 0x1000;
-#else /* MACH_HYP */
- vm_size_t phys_last_kb;
-
- if (boot_info.flags & MULTIBOOT_MEM_MAP) {
- struct multiboot_mmap *map, *map_end;
-
- map = (void*) phystokv(boot_info.mmap_addr);
- map_end = (void*) map + boot_info.mmap_count;
-
- while (map + 1 <= map_end) {
- if (map->Type == MB_ARD_MEMORY) {
- unsigned long long start = map->BaseAddr, end = map->BaseAddr + map->Length;;
-
- if (start >= 0x100000000ULL) {
- printf("Ignoring %luMiB RAM region above 4GiB\n", (unsigned long) (map->Length >> 20));
- } else {
- if (end >= 0x100000000ULL) {
- printf("Truncating memory region to 4GiB\n");
- end = 0x0ffffffffU;
- }
- if (end > phys_last_addr)
- phys_last_addr = end;
-
- printf("AT386 boot: physical memory map from 0x%lx to 0x%lx\n",
- (unsigned long) start,
- (unsigned long) end);
- }
- }
- map = (void*) map + map->size + sizeof(map->size);
- }
- } else {
- phys_last_kb = 0x400 + boot_info.mem_upper;
- /* Avoid 4GiB overflow. */
- if (phys_last_kb < 0x400 || phys_last_kb >= 0x400000) {
- printf("Truncating memory size to 4GiB\n");
- phys_last_addr = 0xffffffffU;
- } else
- phys_last_addr = phys_last_kb * 0x400;
- }
-#endif /* MACH_HYP */
-
- printf("AT386 boot: physical memory from 0x%lx to 0x%lx\n",
- phys_first_addr, phys_last_addr);
-
- /* Reserve room for virtual mappings.
- * Yes, this loses memory. Blame i386. */
- max_phys_size = VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS - VM_KERNEL_MAP_SIZE;
- if (phys_last_addr - phys_first_addr > max_phys_size) {
- phys_last_addr = phys_first_addr + max_phys_size;
- printf("Truncating memory to %luMiB\n", (phys_last_addr - phys_first_addr) / (1024 * 1024));
- /* TODO Xen: be nice, free lost memory */
- }
-
- phys_first_addr = round_page(phys_first_addr);
- phys_last_addr = trunc_page(phys_last_addr);
-
-#ifdef MACH_HYP
- /* Memory is just contiguous */
- avail_remaining = phys_last_addr;
-#else /* MACH_HYP */
- avail_remaining
- = phys_last_addr - (0x100000 - (boot_info.mem_lower * 0x400)
- - 0x1000);
-#endif /* MACH_HYP */
+ halt_all_cpus(1);
}
/*
@@ -361,9 +286,13 @@ i386at_init(void)
#endif /* MACH_HYP */
/*
- * Find memory size parameters.
+ * Read memory map and load it into the physical page allocator.
*/
- mem_size_init();
+#ifdef MACH_HYP
+ biosmem_xen_bootstrap();
+#else /* MACH_HYP */
+ biosmem_bootstrap((struct multiboot_raw_info *) &boot_info);
+#endif /* MACH_HYP */
#ifdef MACH_XEN
kernel_cmdline = (char*) boot_info.cmd_line;
@@ -374,7 +303,7 @@ i386at_init(void)
int len = strlen ((char*)phystokv(boot_info.cmdline)) + 1;
assert(init_alloc_aligned(round_page(len), &addr));
kernel_cmdline = (char*) phystokv(addr);
- memcpy(kernel_cmdline, (char*)phystokv(boot_info.cmdline), len);
+ memcpy(kernel_cmdline, (void *)phystokv(boot_info.cmdline), len);
boot_info.cmdline = addr;
}
@@ -411,6 +340,13 @@ i386at_init(void)
pmap_bootstrap();
/*
+ * Load physical segments into the VM system.
+ * The early allocation functions become unusable after
+ * this point.
+ */
+ biosmem_setup();
+
+ /*
* We'll have to temporarily install a direct mapping
* between physical memory and low linear memory,
* until we start using our new kernel segment descriptors.
@@ -464,11 +400,6 @@ i386at_init(void)
pmap_clear_bootstrap_pagetable((void *)boot_info.pt_base);
#endif /* MACH_PV_PAGETABLES */
- /* Interrupt stacks are allocated in physical memory,
- while kernel stacks are allocated in kernel virtual memory,
- so phys_last_addr serves as a convenient dividing point. */
- int_stack_high = phystokv(phys_last_addr);
-
/*
* Initialize and activate the real i386 protected-mode structures.
*/
@@ -514,10 +445,8 @@ i386at_init(void)
hyp_p2m_init();
#endif /* MACH_XEN */
- /* XXX We'll just use the initialization stack we're already running on
- as the interrupt stack for now. Later this will have to change,
- because the init stack will get freed after bootup. */
- asm("movl %%esp,%0" : "=m" (int_stack_top));
+ int_stack_base = (vm_offset_t)&int_stack;
+ int_stack_top = int_stack_base + KERNEL_STACK_SIZE - 4;
}
/*
@@ -526,6 +455,10 @@ i386at_init(void)
*/
void c_boot_entry(vm_offset_t bi)
{
+#if ENABLE_IMMEDIATE_CONSOLE
+ romputc = immc_romputc;
+#endif /* ENABLE_IMMEDIATE_CONSOLE */
+
/* Stash the boot_image_info pointer. */
boot_info = *(typeof(boot_info)*)phystokv(bi);
int cpu_type;
@@ -565,6 +498,17 @@ void c_boot_entry(vm_offset_t bi)
kern_sym_start, kern_sym_end,
symtab_size, strtab_size);
}
+
+ if ((boot_info.flags & MULTIBOOT_ELF_SHDR)
+ && boot_info.syms.e.num)
+ {
+ elf_shdr_num = boot_info.syms.e.num;
+ elf_shdr_size = boot_info.syms.e.size;
+ elf_shdr_addr = (vm_offset_t)phystokv(boot_info.syms.e.addr);
+ elf_shdr_shndx = boot_info.syms.e.shndx;
+
+ printf("ELF section header table at %08lx\n", elf_shdr_addr);
+ }
#endif /* MACH_KDB */
#endif /* MACH_XEN */
@@ -581,7 +525,14 @@ void c_boot_entry(vm_offset_t bi)
*/
if (kern_sym_start)
{
- aout_db_sym_init(kern_sym_start, kern_sym_end, "mach", 0);
+ aout_db_sym_init((char *)kern_sym_start, (char *)kern_sym_end, "mach", (char *)0);
+ }
+
+ if (elf_shdr_num)
+ {
+ elf_db_sym_init(elf_shdr_num,elf_shdr_size,
+ elf_shdr_addr, elf_shdr_shndx,
+ "mach", NULL);
}
#endif /* MACH_KDB */
@@ -620,15 +571,13 @@ void c_boot_entry(vm_offset_t bi)
#include <mach/time_value.h>
int
-timemmap(dev,off,prot)
+timemmap(dev, off, prot)
+ dev_t dev;
+ vm_offset_t off;
vm_prot_t prot;
{
extern time_value_t *mtime;
-#ifdef lint
- dev++; off++;
-#endif /* lint */
-
if (prot & VM_PROT_WRITE) return (-1);
return (i386_btop(pmap_extract(pmap_kernel(), (vm_offset_t) mtime)));
@@ -665,188 +614,20 @@ resettodr(void)
unsigned int pmap_free_pages(void)
{
- return atop(avail_remaining);
+ return vm_page_atop(phys_last_addr); /* XXX */
}
-/* Always returns page-aligned regions. */
boolean_t
init_alloc_aligned(vm_size_t size, vm_offset_t *addrp)
{
- vm_offset_t addr;
-
-#ifdef MACH_HYP
- /* There is none */
- if (!avail_next)
- avail_next = _kvtophys(boot_info.pt_base) + (boot_info.nr_pt_frames + 3) * 0x1000;
-#else /* MACH_HYP */
- extern char start[], end[];
- int i;
- static int wrapped = 0;
-
- /* Memory regions to skip. */
- vm_offset_t cmdline_start_pa = boot_info.flags & MULTIBOOT_CMDLINE
- ? boot_info.cmdline : 0;
- vm_offset_t cmdline_end_pa = cmdline_start_pa
- ? cmdline_start_pa+strlen((char*)phystokv(cmdline_start_pa))+1
- : 0;
- vm_offset_t mods_start_pa = boot_info.flags & MULTIBOOT_MODS
- ? boot_info.mods_addr : 0;
- vm_offset_t mods_end_pa = mods_start_pa
- ? mods_start_pa
- + boot_info.mods_count * sizeof(struct multiboot_module)
- : 0;
-
- retry:
-#endif /* MACH_HYP */
-
- /* Page-align the start address. */
- avail_next = round_page(avail_next);
+ *addrp = biosmem_bootalloc(vm_page_atop(vm_page_round(size)));
-#ifndef MACH_HYP
- /* Start with memory above 16MB, reserving the low memory for later. */
- /* Don't care on Xen */
- if (!wrapped && phys_last_addr > 16 * 1024*1024)
- {
- if (avail_next < 16 * 1024*1024)
- avail_next = 16 * 1024*1024;
- else if (avail_next == phys_last_addr)
- {
- /* We have used all the memory above 16MB, so now start on
- the low memory. This will wind up at the end of the list
- of free pages, so it should not have been allocated to any
- other use in early initialization before the Linux driver
- glue initialization needs to allocate low memory. */
- avail_next = 0x1000;
- wrapped = 1;
- }
- }
-#endif /* MACH_HYP */
-
- /* Check if we have reached the end of memory. */
- if (avail_next ==
- (
-#ifndef MACH_HYP
- wrapped ? 16 * 1024*1024 :
-#endif /* MACH_HYP */
- phys_last_addr))
+ if (*addrp == 0)
return FALSE;
- /* Tentatively assign the current location to the caller. */
- addr = avail_next;
-
- /* Bump the pointer past the newly allocated region
- and see where that puts us. */
- avail_next += size;
-
-#ifndef MACH_HYP
- /* Skip past the I/O and ROM area. */
- if (boot_info.flags & MULTIBOOT_MEM_MAP)
- {
- struct multiboot_mmap *map, *map_end, *current = NULL, *next = NULL;
- unsigned long long minimum_next = ~0ULL;
-
- map = (void*) phystokv(boot_info.mmap_addr);
- map_end = (void*) map + boot_info.mmap_count;
-
- /* Find both our current map, and the next one */
- while (map + 1 <= map_end)
- {
- if (map->Type == MB_ARD_MEMORY)
- {
- unsigned long long start = map->BaseAddr;
- unsigned long long end = start + map->Length;;
-
- if (start <= addr && avail_next <= end)
- {
- /* Ok, fits in the current map */
- current = map;
- break;
- }
- else if (avail_next <= start && start < minimum_next)
- {
- /* This map is not far from avail_next */
- next = map;
- minimum_next = start;
- }
- }
- map = (void*) map + map->size + sizeof(map->size);
- }
-
- if (!current) {
- /* Area does not fit in the current map, switch to next
- * map if any */
- if (!next || next->BaseAddr >= phys_last_addr)
- {
- /* No further reachable map, we have reached
- * the end of memory, but possibly wrap around
- * 16MiB. */
- avail_next = phys_last_addr;
- goto retry;
- }
-
- /* Start from next map */
- avail_next = next->BaseAddr;
- goto retry;
- }
- }
- else if ((avail_next > (boot_info.mem_lower * 0x400)) && (addr < 0x100000))
- {
- avail_next = 0x100000;
- goto retry;
- }
-
- /* Skip our own kernel code, data, and bss. */
- if ((phystokv(avail_next) > (vm_offset_t)start) && (phystokv(addr) < (vm_offset_t)end))
- {
- avail_next = _kvtophys(end);
- goto retry;
- }
-
- /* Skip any areas occupied by valuable boot_info data. */
- if ((avail_next > cmdline_start_pa) && (addr < cmdline_end_pa))
- {
- avail_next = cmdline_end_pa;
- goto retry;
- }
- if ((avail_next > mods_start_pa) && (addr < mods_end_pa))
- {
- avail_next = mods_end_pa;
- goto retry;
- }
- if ((phystokv(avail_next) > kern_sym_start) && (phystokv(addr) < kern_sym_end))
- {
- avail_next = _kvtophys(kern_sym_end);
- goto retry;
- }
- if (boot_info.flags & MULTIBOOT_MODS)
- {
- struct multiboot_module *m = (struct multiboot_module *)
- phystokv(boot_info.mods_addr);
- for (i = 0; i < boot_info.mods_count; i++)
- {
- if ((avail_next > m[i].mod_start)
- && (addr < m[i].mod_end))
- {
- avail_next = m[i].mod_end;
- goto retry;
- }
- /* XXX string */
- }
- }
-#endif /* MACH_HYP */
-
- avail_remaining -= size;
-
- *addrp = addr;
return TRUE;
}
-boolean_t pmap_next_page(addrp)
- vm_offset_t *addrp;
-{
- return init_alloc_aligned(PAGE_SIZE, addrp);
-}
-
/* Grab a physical page:
the standard memory allocation mechanism
during system initialization. */
@@ -854,13 +635,12 @@ vm_offset_t
pmap_grab_page(void)
{
vm_offset_t addr;
- if (!pmap_next_page(&addr))
+ if (!init_alloc_aligned(PAGE_SIZE, &addr))
panic("Not enough memory to initialize Mach");
return addr;
}
-boolean_t pmap_valid_page(x)
- vm_offset_t x;
+boolean_t pmap_valid_page(vm_offset_t x)
{
/* XXX is this OK? What does it matter for? */
return (((phys_first_addr <= x) && (x < phys_last_addr))
diff --git a/i386/i386at/model_dep.h b/i386/i386at/model_dep.h
new file mode 100644
index 00000000..47551b85
--- /dev/null
+++ b/i386/i386at/model_dep.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _MODEL_DEP_H_
+#define _MODEL_DEP_H_
+
+#include <i386/vm_param.h>
+#include <mach/vm_prot.h>
+
+/*
+ * Interrupt stack.
+ */
+extern vm_offset_t int_stack_top, int_stack_base;
+
+/* Check whether P points to the interrupt stack. */
+#define ON_INT_STACK(P) (((P) & ~(KERNEL_STACK_SIZE-1)) == int_stack_base)
+
+extern int timemmap(dev_t dev, vm_offset_t off, vm_prot_t prot);
+
+void inittodr(void);
+
+boolean_t init_alloc_aligned(vm_size_t size, vm_offset_t *addrp);
+
+#endif /* _MODEL_DEP_H_ */
diff --git a/i386/i386at/pic_isa.c b/i386/i386at/pic_isa.c
index e48fb507..0b36534e 100644
--- a/i386/i386at/pic_isa.c
+++ b/i386/i386at/pic_isa.c
@@ -28,10 +28,10 @@
#include <i386/ipl.h>
#include <i386/pic.h>
#include <i386/fpu.h>
+#include <i386/hardclock.h>
#include <i386at/kd.h>
/* These interrupts are always present */
-extern void hardclock();
void (*ivect[NINTR])() = {
/* 00 */ hardclock, /* always */
diff --git a/i386/i386at/rtc.c b/i386/i386at/rtc.c
index 67768013..01e09772 100644
--- a/i386/i386at/rtc.c
+++ b/i386/i386at/rtc.c
@@ -53,10 +53,10 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <i386/pio.h>
#include <i386at/rtc.h>
-static int first_rtcopen_ever = 1;
+static boolean_t first_rtcopen_ever = TRUE;
void
-rtcinit()
+rtcinit(void)
{
outb(RTC_ADDR, RTC_A);
outb(RTC_DATA, RTC_DIV2 | RTC_RATE6);
@@ -66,12 +66,12 @@ rtcinit()
int
-rtcget(regs)
-unsigned char *regs;
+rtcget(struct rtc_st *st)
{
+ unsigned char *regs = (unsigned char *)st;
if (first_rtcopen_ever) {
rtcinit();
- first_rtcopen_ever = 0;
+ first_rtcopen_ever = FALSE;
}
outb(RTC_ADDR, RTC_D);
if ((inb(RTC_DATA) & RTC_VRT) == 0) return(-1);
@@ -83,14 +83,14 @@ unsigned char *regs;
}
void
-rtcput(regs)
-unsigned char *regs;
+rtcput(struct rtc_st *st)
{
- register unsigned char x;
+ unsigned char *regs = (unsigned char *)st;
+ unsigned char x;
if (first_rtcopen_ever) {
rtcinit();
- first_rtcopen_ever = 0;
+ first_rtcopen_ever = FALSE;
}
outb(RTC_ADDR, RTC_B);
x = inb(RTC_DATA);
@@ -103,34 +103,29 @@ unsigned char *regs;
extern struct timeval time;
-extern struct timezone tz;
static int month[12] = {31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
int
-yeartoday(year)
-int year;
+yeartoday(int year)
{
return((year%4) ? 365 : 366);
}
int
-hexdectodec(n)
-char n;
+hexdectodec(char n)
{
return(((n>>4)&0x0F)*10 + (n&0x0F));
}
char
-dectohexdec(n)
-int n;
+dectohexdec(int n)
{
return((char)(((n/10)<<4)&0xF0) | ((n%10)&0x0F));
}
int
-readtodc(tp)
- u_int *tp;
+readtodc(u_int *tp)
{
struct rtc_st rtclk;
time_t n;
@@ -172,7 +167,7 @@ readtodc(tp)
}
int
-writetodc()
+writetodc(void)
{
struct rtc_st rtclk;
time_t n;
diff --git a/i386/i386at/rtc.h b/i386/i386at/rtc.h
index ced39b98..97eabe95 100644
--- a/i386/i386at/rtc.h
+++ b/i386/i386at/rtc.h
@@ -45,6 +45,9 @@ NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
+#ifndef _RTC_H_
+#define _RTC_H_
+
#define RTC_ADDR 0x70 /* I/O port address for register select */
#define RTC_DATA 0x71 /* I/O port address for data read/write */
@@ -114,7 +117,7 @@ struct rtc_st {
*/
#define load_rtc(regs) \
{\
- register int i; \
+ int i; \
\
for (i = 0; i < RTC_NREG; i++) { \
outb(RTC_ADDR, i); \
@@ -127,7 +130,7 @@ struct rtc_st {
*/
#define save_rtc(regs) \
{ \
- register int i; \
+ int i; \
for (i = 0; i < RTC_NREGP; i++) { \
outb(RTC_ADDR, i); \
outb(RTC_DATA, regs[i]);\
@@ -136,3 +139,5 @@ struct rtc_st {
extern int readtodc(u_int *tp);
extern int writetodc(void);
+
+#endif /* _RTC_H_ */
diff --git a/i386/include/mach/i386/asm.h b/i386/include/mach/i386/asm.h
index 3e3f48bf..4e3b589a 100644
--- a/i386/include/mach/i386/asm.h
+++ b/i386/include/mach/i386/asm.h
@@ -24,6 +24,8 @@
* the rights to redistribute these changes.
*/
+#ifndef _MACH_I386_ASM_H_
+#define _MACH_I386_ASM_H_
#define S_ARG0 4(%esp)
#define S_ARG1 8(%esp)
@@ -113,3 +115,5 @@
#define Entry(x) .globl EXT(x); .p2align TEXT_ALIGN; LEXT(x)
#define DATA(x) .globl EXT(x); .p2align DATA_ALIGN; LEXT(x)
+
+#endif /* _MACH_I386_ASM_H_ */
diff --git a/i386/include/mach/i386/cthreads.h b/i386/include/mach/i386/cthreads.h
index f9755b4a..d2aa16f5 100644
--- a/i386/include/mach/i386/cthreads.h
+++ b/i386/include/mach/i386/cthreads.h
@@ -53,4 +53,4 @@ typedef volatile int spin_lock_t;
#endif /* __GNUC__ */
-#endif _MACHINE_CTHREADS_H_
+#endif /* _MACHINE_CTHREADS_H_ */
diff --git a/i386/include/mach/i386/disk.h b/i386/include/mach/i386/disk.h
deleted file mode 100644
index 40ed4fa8..00000000
--- a/i386/include/mach/i386/disk.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- Copyright 1988, 1989 by Intel Corporation, Santa Clara, California.
-
- All Rights Reserved
-
-Permission to use, copy, modify, and distribute this software and
-its documentation for any purpose and without fee is hereby
-granted, provided that the above copyright notice appears in all
-copies and that both the copyright notice and this permission notice
-appear in supporting documentation, and that the name of Intel
-not be used in advertising or publicity pertaining to distribution
-of the software without specific, written prior permission.
-
-INTEL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
-INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
-IN NO EVENT SHALL INTEL BE LIABLE FOR ANY SPECIAL, INDIRECT, OR
-CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-LOSS OF USE, DATA OR PROFITS, WHETHER IN ACTION OF CONTRACT,
-NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
-WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-*/
-
-/*
- * disk.h
- */
-
-#if defined(__linux__) || defined(__masix__)
-#define PART_DISK 4 /* partition number for entire disk */
-#else
-#define PART_DISK 2 /* partition number for entire disk */
-#endif
-
-
-/* driver ioctl() commands */
-
-#define V_CONFIG _IOW('v',1,union io_arg)/* Configure Drive */
-#define V_REMOUNT _IO('v',2) /* Remount Drive */
-#define V_ADDBAD _IOW('v',3,union io_arg)/* Add Bad Sector */
-#define V_GETPARMS _IOR('v',4,struct disk_parms) /* Get drive/partition parameters */
-#define V_FORMAT _IOW('v',5,union io_arg)/* Format track(s) */
-#define V_PDLOC _IOR('v',6,int) /* Ask driver where pdinfo is on disk */
-
-#define V_ABS _IOW('v',9,int) /* set a sector for an absolute addr */
-#define V_RDABS _IOW('v',10,struct absio)/* Read a sector at an absolute addr */
-#define V_WRABS _IOW('v',11,struct absio)/* Write a sector to absolute addr */
-#define V_VERIFY _IOWR('v',12,union vfy_io)/* Read verify sector(s) */
-#define V_XFORMAT _IO('v',13) /* Selectively mark sectors as bad */
-#define V_SETPARMS _IOW('v',14,int) /* Set drivers parameters */
-
-
-/*
- * Data structure for the V_VERIFY ioctl
- */
-union vfy_io {
- struct {
- long abs_sec; /* absolute sector number */
- u_short num_sec; /* number of sectors to verify */
- u_short time_flg; /* flag to indicate time the ops */
- }vfy_in;
- struct {
- long deltatime; /* duration of operation */
- u_short err_code; /* reason for failure */
- }vfy_out;
-};
-
-
-/* data structure returned by the Get Parameters ioctl: */
-struct disk_parms {
-/*00*/ char dp_type; /* Disk type (see below) */
- u_char dp_heads; /* Number of heads */
- u_short dp_cyls; /* Number of cylinders */
-/*04*/ u_char dp_sectors; /* Number of sectors/track */
- u_short dp_secsiz; /* Number of bytes/sector */
- /* for this partition: */
-/*08*/ u_short dp_ptag; /* Partition tag */
- u_short dp_pflag; /* Partition flag */
-/*0c*/ long dp_pstartsec; /* Starting absolute sector number */
-/*10*/ long dp_pnumsec; /* Number of sectors */
-/*14*/ u_char dp_dosheads; /* Number of heads */
- u_short dp_doscyls; /* Number of cylinders */
-/*18*/ u_char dp_dossectors; /* Number of sectors/track */
-};
-
-/* Disk types for disk_parms.dp_type: */
-#define DPT_WINI 1 /* Winchester disk */
-#define DPT_FLOPPY 2 /* Floppy */
-#define DPT_OTHER 3 /* Other type of disk */
-#define DPT_NOTDISK 0 /* Not a disk device */
-
-/* Data structure for V_RDABS/V_WRABS ioctl's */
-struct absio {
- long abs_sec; /* Absolute sector number (from 0) */
- char *abs_buf; /* Sector buffer */
-};
-
diff --git a/i386/include/mach/i386/mach_i386_types.h b/i386/include/mach/i386/mach_i386_types.h
index b03c0b06..b0552809 100644
--- a/i386/include/mach/i386/mach_i386_types.h
+++ b/i386/include/mach/i386/mach_i386_types.h
@@ -47,9 +47,9 @@ typedef struct descriptor *descriptor_list_t;
#ifdef MACH_KERNEL
#include <i386/io_perm.h>
-#else
+#else /* MACH_KERNEL */
typedef unsigned short io_port_t;
typedef mach_port_t io_perm_t;
-#endif
+#endif /* MACH_KERNEL */
#endif /* _MACH_MACH_I386_TYPES_H_ */
diff --git a/i386/include/mach/i386/multiboot.h b/i386/include/mach/i386/multiboot.h
index 8f1c47b0..c66ca032 100644
--- a/i386/include/mach/i386/multiboot.h
+++ b/i386/include/mach/i386/multiboot.h
@@ -188,5 +188,110 @@ struct multiboot_mmap
/* usable memory "Type", all others are reserved. */
#define MB_ARD_MEMORY 1
+/*
+ * Copyright (c) 2010, 2012 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/*
+ * Versions used by the biosmem module.
+ */
+
+#include <kern/macros.h>
+
+/*
+ * Magic number provided by the OS to the boot loader.
+ */
+#define MULTIBOOT_OS_MAGIC 0x1badb002
+
+/*
+ * Multiboot flags requesting services from the boot loader.
+ */
+#define MULTIBOOT_OS_MEMORY_INFO 0x2
+
+#define MULTIBOOT_OS_FLAGS MULTIBOOT_OS_MEMORY_INFO
+
+/*
+ * Magic number to identify a multiboot compliant boot loader.
+ */
+#define MULTIBOOT_LOADER_MAGIC 0x2badb002
+
+/*
+ * Multiboot flags set by the boot loader.
+ */
+#define MULTIBOOT_LOADER_MEMORY 0x01
+#define MULTIBOOT_LOADER_CMDLINE 0x04
+#define MULTIBOOT_LOADER_MODULES 0x08
+#define MULTIBOOT_LOADER_SHDR 0x20
+#define MULTIBOOT_LOADER_MMAP 0x40
+
+/*
+ * A multiboot module.
+ */
+struct multiboot_raw_module {
+ uint32_t mod_start;
+ uint32_t mod_end;
+ uint32_t string;
+ uint32_t reserved;
+} __packed;
+
+/*
+ * Memory map entry.
+ */
+struct multiboot_raw_mmap_entry {
+ uint32_t size;
+ uint64_t base_addr;
+ uint64_t length;
+ uint32_t type;
+} __packed;
+
+/*
+ * Multiboot information structure as passed by the boot loader.
+ */
+struct multiboot_raw_info {
+ uint32_t flags;
+ uint32_t mem_lower;
+ uint32_t mem_upper;
+ uint32_t unused0;
+ uint32_t cmdline;
+ uint32_t mods_count;
+ uint32_t mods_addr;
+ uint32_t shdr_num;
+ uint32_t shdr_size;
+ uint32_t shdr_addr;
+ uint32_t shdr_strndx;
+ uint32_t mmap_length;
+ uint32_t mmap_addr;
+ uint32_t unused1[9];
+} __packed;
+
+/*
+ * Versions of the multiboot structures suitable for use with 64-bit pointers.
+ */
+
+struct multiboot_os_module {
+ void *mod_start;
+ void *mod_end;
+ char *string;
+};
+
+struct multiboot_os_info {
+ uint32_t flags;
+ char *cmdline;
+ struct multiboot_module *mods_addr;
+ uint32_t mods_count;
+};
#endif /* _MACH_I386_MULTIBOOT_H_ */
diff --git a/i386/include/mach/i386/rpc.h b/i386/include/mach/i386/rpc.h
deleted file mode 100644
index 71d31fb9..00000000
--- a/i386/include/mach/i386/rpc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-
-struct rpc_csig_action
-{
-};
-
-struct rpc_csig_entry
-{
-};
-
diff --git a/i386/include/mach/i386/vm_param.h b/i386/include/mach/i386/vm_param.h
index 8f708f0f..3a209b83 100644
--- a/i386/include/mach/i386/vm_param.h
+++ b/i386/include/mach/i386/vm_param.h
@@ -29,7 +29,7 @@
* Date: 1985
*
* I386 machine dependent virtual memory parameters.
- * Most of the declarations are preceeded by I386_ (or i386_)
+ * Most of the declarations are preceded by I386_ (or i386_)
* which is OK because only I386 specific code will be using
* them.
*/
diff --git a/i386/include/mach/i386/vm_types.h b/i386/include/mach/i386/vm_types.h
index 1439940b..4a58b1cb 100644
--- a/i386/include/mach/i386/vm_types.h
+++ b/i386/include/mach/i386/vm_types.h
@@ -77,6 +77,15 @@ typedef unsigned long vm_offset_t;
typedef vm_offset_t * vm_offset_array_t;
/*
+ * A type for physical addresses.
+ */
+#ifdef PAE
+typedef unsigned long long phys_addr_t;
+#else /* PAE */
+typedef unsigned long phys_addr_t;
+#endif /* PAE */
+
+/*
* A vm_size_t is the proper type for e.g.
* expressing the difference between two
* vm_offset_t entities.
diff --git a/i386/intel/pmap.c b/i386/intel/pmap.c
index 490c1d95..0771a08d 100644
--- a/i386/intel/pmap.c
+++ b/i386/intel/pmap.c
@@ -83,6 +83,7 @@
#include <i386/proc_reg.h>
#include <i386/locore.h>
#include <i386/model_dep.h>
+#include <i386at/model_dep.h>
#ifdef MACH_PSEUDO_PHYS
#define WRITE_PTE(pte_p, pte_entry) *(pte_p) = pte_entry?pa_to_ma(pte_entry):0;
@@ -166,7 +167,7 @@ vm_offset_t kernel_virtual_end;
#define unlock_pvh_pai(pai) (bit_unlock(pai, pv_lock_table))
/*
- * Array of physical page attribites for managed pages.
+ * Array of physical page attributes for managed pages.
* One byte per physical page.
*/
char *pmap_phys_attributes;
@@ -402,7 +403,7 @@ pmap_t kernel_pmap;
struct kmem_cache pmap_cache; /* cache of pmap structures */
-int pmap_debug = 0; /* flag for debugging prints */
+boolean_t pmap_debug = FALSE; /* flag for debugging prints */
#if 0
int ptes_per_vm_page; /* number of hardware ptes needed
@@ -413,21 +414,20 @@ int ptes_per_vm_page; /* number of hardware ptes needed
unsigned int inuse_ptepages_count = 0; /* debugging */
-extern char end;
-
/*
* Pointer to the basic page directory for the kernel.
* Initialized by pmap_bootstrap().
*/
pt_entry_t *kernel_page_dir;
-void pmap_remove_range(); /* forward */
-#if NCPUS > 1
-void signal_cpus(); /* forward */
-#endif /* NCPUS > 1 */
+/*
+ * Two slots for temporary physical page mapping, to allow for
+ * physical-to-physical transfers.
+ */
+static pmap_mapwindow_t mapwindows[PMAP_NMAPWINDOWS];
static inline pt_entry_t *
-pmap_pde(pmap_t pmap, vm_offset_t addr)
+pmap_pde(const pmap_t pmap, vm_offset_t addr)
{
if (pmap == kernel_pmap)
addr = kvtolin(addr);
@@ -442,7 +442,7 @@ pmap_pde(pmap_t pmap, vm_offset_t addr)
* This is only used internally.
*/
pt_entry_t *
-pmap_pte(pmap_t pmap, vm_offset_t addr)
+pmap_pte(const pmap_t pmap, vm_offset_t addr)
{
pt_entry_t *ptp;
pt_entry_t pte;
@@ -459,10 +459,9 @@ pmap_pte(pmap_t pmap, vm_offset_t addr)
#define DEBUG_PTE_PAGE 0
#if DEBUG_PTE_PAGE
-void ptep_check(ptep)
- ptep_t ptep;
+void ptep_check(ptep_t ptep)
{
- register pt_entry_t *pte, *epte;
+ pt_entry_t *pte, *epte;
int ctu, ctw;
/* check the use and wired counts */
@@ -496,13 +495,13 @@ void ptep_check(ptep)
* For now, VM is already on, we only need to map the
* specified memory.
*/
-vm_offset_t pmap_map(virt, start, end, prot)
- register vm_offset_t virt;
- register vm_offset_t start;
- register vm_offset_t end;
- register int prot;
+vm_offset_t pmap_map(
+ vm_offset_t virt,
+ vm_offset_t start,
+ vm_offset_t end,
+ int prot)
{
- register int ps;
+ int ps;
ps = PAGE_SIZE;
while (start < end) {
@@ -519,15 +518,15 @@ vm_offset_t pmap_map(virt, start, end, prot)
* [phys_first_addr, phys_last_addr) (i.e., devices).
* Otherwise like pmap_map.
*/
-vm_offset_t pmap_map_bd(virt, start, end, prot)
- register vm_offset_t virt;
- register vm_offset_t start;
- register vm_offset_t end;
- vm_prot_t prot;
+vm_offset_t pmap_map_bd(
+ vm_offset_t virt,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot)
{
- register pt_entry_t template;
- register pt_entry_t *pte;
- int spl;
+ pt_entry_t template;
+ pt_entry_t *pte;
+ int spl;
#ifdef MACH_PV_PAGETABLES
int n, i = 0;
struct mmu_update update[HYP_BATCH_MMU_UPDATES];
@@ -580,7 +579,7 @@ vm_offset_t pmap_map_bd(virt, start, end, prot)
* and direct-map all physical memory.
* Called with mapping off.
*/
-void pmap_bootstrap()
+void pmap_bootstrap(void)
{
/*
* Mapping is turned off; we must reference only physical addresses.
@@ -781,6 +780,12 @@ void pmap_bootstrap()
}
for (; pte < ptable+NPTES; pte++)
{
+ if (va >= kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE && va < kernel_virtual_end)
+ {
+ pmap_mapwindow_t *win = &mapwindows[atop(va - (kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE))];
+ win->entry = pte;
+ win->vaddr = va;
+ }
WRITE_PTE(pte, 0);
va += INTEL_PGBYTES;
}
@@ -891,12 +896,41 @@ void pmap_clear_bootstrap_pagetable(pt_entry_t *base) {
}
#endif /* MACH_PV_PAGETABLES */
-void pmap_virtual_space(startp, endp)
- vm_offset_t *startp;
- vm_offset_t *endp;
+/*
+ * Create a temporary mapping for a given physical entry
+ *
+ * This can be used to access physical pages which are not mapped 1:1 by
+ * phystokv().
+ */
+pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry)
+{
+ pmap_mapwindow_t *map;
+
+ /* Find an empty one. */
+ for (map = &mapwindows[0]; map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]; map++)
+ if (!(*map->entry))
+ break;
+ assert(map < &mapwindows[sizeof (mapwindows) / sizeof (*mapwindows)]);
+
+ WRITE_PTE(map->entry, entry);
+ return map;
+}
+
+/*
+ * Destroy a temporary mapping for a physical entry
+ */
+void pmap_put_mapwindow(pmap_mapwindow_t *map)
+{
+ WRITE_PTE(map->entry, 0);
+ PMAP_UPDATE_TLBS(kernel_pmap, map->vaddr, map->vaddr + PAGE_SIZE);
+}
+
+void pmap_virtual_space(
+ vm_offset_t *startp,
+ vm_offset_t *endp)
{
*startp = kernel_virtual_start;
- *endp = kernel_virtual_end;
+ *endp = kernel_virtual_end - PMAP_NMAPWINDOWS * PAGE_SIZE;
}
/*
@@ -904,11 +938,11 @@ void pmap_virtual_space(startp, endp)
* Called by vm_init, to initialize any structures that the pmap
* system needs to map virtual memory.
*/
-void pmap_init()
+void pmap_init(void)
{
- register long npages;
+ long npages;
vm_offset_t addr;
- register vm_size_t s;
+ vm_size_t s;
#if NCPUS > 1
int i;
#endif /* NCPUS > 1 */
@@ -926,7 +960,7 @@ void pmap_init()
s = round_page(s);
if (kmem_alloc_wired(kernel_map, &addr, s) != KERN_SUCCESS)
panic("pmap_init");
- memset((char *) addr, 0, s);
+ memset((void *) addr, 0, s);
/*
* Allocate the structures first to preserve word-alignment.
@@ -944,9 +978,9 @@ void pmap_init()
* and of the physical-to-virtual entries.
*/
s = (vm_size_t) sizeof(struct pmap);
- kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, NULL, NULL, 0);
+ kmem_cache_init(&pmap_cache, "pmap", s, 0, NULL, 0);
s = (vm_size_t) sizeof(struct pv_entry);
- kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, NULL, NULL, 0);
+ kmem_cache_init(&pv_list_cache, "pv_entry", s, 0, NULL, 0);
#if NCPUS > 1
/*
@@ -968,8 +1002,7 @@ void pmap_init()
#define valid_page(x) (pmap_initialized && pmap_valid_page(x))
-boolean_t pmap_verify_free(phys)
- vm_offset_t phys;
+boolean_t pmap_verify_free(vm_offset_t phys)
{
pv_entry_t pv_h;
int pai;
@@ -1003,10 +1036,10 @@ boolean_t pmap_verify_free(phys)
* since these must be unlocked to use vm_page_grab.
*/
vm_offset_t
-pmap_page_table_page_alloc()
+pmap_page_table_page_alloc(void)
{
- register vm_page_t m;
- register vm_offset_t pa;
+ vm_page_t m;
+ vm_offset_t pa;
check_simple_locks();
@@ -1092,8 +1125,7 @@ void pmap_map_mfn(void *_addr, unsigned long mfn) {
* and be removed from its page directory.
*/
void
-pmap_page_table_page_dealloc(pa)
- vm_offset_t pa;
+pmap_page_table_page_dealloc(vm_offset_t pa)
{
vm_page_t m;
@@ -1118,11 +1150,10 @@ pmap_page_table_page_dealloc(pa)
* the map will be used in software only, and
* is bounded by that size.
*/
-pmap_t pmap_create(size)
- vm_size_t size;
+pmap_t pmap_create(vm_size_t size)
{
- register pmap_t p;
- register pmap_statistics_t stats;
+ pmap_t p;
+ pmap_statistics_t stats;
/*
* A software use-only map doesn't even need a map.
@@ -1198,13 +1229,12 @@ pmap_t pmap_create(size)
* no valid mappings.
*/
-void pmap_destroy(p)
- register pmap_t p;
+void pmap_destroy(pmap_t p)
{
- register pt_entry_t *pdep;
- register vm_offset_t pa;
- register int c, s;
- register vm_page_t m;
+ pt_entry_t *pdep;
+ vm_offset_t pa;
+ int c, s;
+ vm_page_t m;
if (p == PMAP_NULL)
return;
@@ -1265,8 +1295,7 @@ void pmap_destroy(p)
* Add a reference to the specified pmap.
*/
-void pmap_reference(p)
- register pmap_t p;
+void pmap_reference(pmap_t p)
{
int s;
if (p != PMAP_NULL) {
@@ -1291,13 +1320,13 @@ void pmap_reference(p)
*/
/* static */
-void pmap_remove_range(pmap, va, spte, epte)
- pmap_t pmap;
- vm_offset_t va;
- pt_entry_t *spte;
- pt_entry_t *epte;
+void pmap_remove_range(
+ pmap_t pmap,
+ vm_offset_t va,
+ pt_entry_t *spte,
+ pt_entry_t *epte)
{
- register pt_entry_t *cpte;
+ pt_entry_t *cpte;
int num_removed, num_unwired;
int pai;
vm_offset_t pa;
@@ -1330,8 +1359,8 @@ void pmap_remove_range(pmap, va, spte, epte)
* Outside range of managed physical memory.
* Just remove the mappings.
*/
- register int i = ptes_per_vm_page;
- register pt_entry_t *lpte = cpte;
+ int i = ptes_per_vm_page;
+ pt_entry_t *lpte = cpte;
do {
#ifdef MACH_PV_PAGETABLES
update[ii].ptr = kv_to_ma(lpte);
@@ -1358,8 +1387,8 @@ void pmap_remove_range(pmap, va, spte, epte)
* Get the modify and reference bits.
*/
{
- register int i;
- register pt_entry_t *lpte;
+ int i;
+ pt_entry_t *lpte;
i = ptes_per_vm_page;
lpte = cpte;
@@ -1388,7 +1417,7 @@ void pmap_remove_range(pmap, va, spte, epte)
* this physical page.
*/
{
- register pv_entry_t pv_h, prev, cur;
+ pv_entry_t pv_h, prev, cur;
pv_h = pai_to_pvh(pai);
if (pv_h->pmap == PMAP_NULL) {
@@ -1447,13 +1476,14 @@ void pmap_remove_range(pmap, va, spte, epte)
* rounded to the hardware page size.
*/
-void pmap_remove(map, s, e)
- pmap_t map;
- vm_offset_t s, e;
+void pmap_remove(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e)
{
int spl;
- register pt_entry_t *pde;
- register pt_entry_t *spte, *epte;
+ pt_entry_t *pde;
+ pt_entry_t *spte, *epte;
vm_offset_t l;
vm_offset_t _s = s;
@@ -1488,15 +1518,15 @@ void pmap_remove(map, s, e)
* Lower the permission for all mappings to a given
* page.
*/
-void pmap_page_protect(phys, prot)
- vm_offset_t phys;
- vm_prot_t prot;
+void pmap_page_protect(
+ vm_offset_t phys,
+ vm_prot_t prot)
{
pv_entry_t pv_h, prev;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
int pai;
- register pmap_t pmap;
+ pmap_t pmap;
int spl;
boolean_t remove;
@@ -1542,7 +1572,7 @@ void pmap_page_protect(phys, prot)
prev = pv_e = pv_h;
do {
- register vm_offset_t va;
+ vm_offset_t va;
pmap = pv_e->pmap;
/*
@@ -1556,8 +1586,8 @@ void pmap_page_protect(phys, prot)
/*
* Consistency checks.
*/
- /* assert(*pte & INTEL_PTE_VALID); XXX */
- /* assert(pte_to_phys(*pte) == phys); */
+ assert(*pte & INTEL_PTE_VALID);
+ assert(pte_to_pa(*pte) == phys);
/*
* Remove the mapping if new protection is NONE
@@ -1568,10 +1598,10 @@ void pmap_page_protect(phys, prot)
* Remove the mapping, collecting any modify bits.
*/
if (*pte & INTEL_PTE_WIRED)
- panic("pmap_remove_all removing a wired page");
+ panic("pmap_page_protect removing a wired page");
{
- register int i = ptes_per_vm_page;
+ int i = ptes_per_vm_page;
do {
pmap_phys_attributes[pai] |=
@@ -1608,7 +1638,7 @@ void pmap_page_protect(phys, prot)
/*
* Write-protect.
*/
- register int i = ptes_per_vm_page;
+ int i = ptes_per_vm_page;
do {
#ifdef MACH_PV_PAGETABLES
@@ -1651,14 +1681,15 @@ void pmap_page_protect(phys, prot)
* specified range of this map as requested.
* Will not increase permissions.
*/
-void pmap_protect(map, s, e, prot)
- pmap_t map;
- vm_offset_t s, e;
- vm_prot_t prot;
+void pmap_protect(
+ pmap_t map,
+ vm_offset_t s,
+ vm_offset_t e,
+ vm_prot_t prot)
{
- register pt_entry_t *pde;
- register pt_entry_t *spte, *epte;
- vm_offset_t l;
+ pt_entry_t *pde;
+ pt_entry_t *spte, *epte;
+ vm_offset_t l;
int spl;
vm_offset_t _s = s;
@@ -1757,16 +1788,16 @@ void pmap_protect(map, s, e, prot)
* or lose information. That is, this routine must actually
* insert this page into the given map NOW.
*/
-void pmap_enter(pmap, v, pa, prot, wired)
- register pmap_t pmap;
- vm_offset_t v;
- register vm_offset_t pa;
- vm_prot_t prot;
- boolean_t wired;
+void pmap_enter(
+ pmap_t pmap,
+ vm_offset_t v,
+ vm_offset_t pa,
+ vm_prot_t prot,
+ boolean_t wired)
{
- register pt_entry_t *pte;
- register pv_entry_t pv_h;
- register int i, pai;
+ pt_entry_t *pte;
+ pv_entry_t pv_h;
+ int i, pai;
pv_entry_t pv_e;
pt_entry_t template;
int spl;
@@ -2059,14 +2090,14 @@ Retry:
* In/out conditions:
* The mapping must already exist in the pmap.
*/
-void pmap_change_wiring(map, v, wired)
- register pmap_t map;
- vm_offset_t v;
- boolean_t wired;
+void pmap_change_wiring(
+ pmap_t map,
+ vm_offset_t v,
+ boolean_t wired)
{
- register pt_entry_t *pte;
- register int i;
- int spl;
+ pt_entry_t *pte;
+ int i;
+ int spl;
/*
* We must grab the pmap system lock because we may
@@ -2114,13 +2145,13 @@ void pmap_change_wiring(map, v, wired)
* with the given map/virtual_address pair.
*/
-vm_offset_t pmap_extract(pmap, va)
- register pmap_t pmap;
- vm_offset_t va;
+vm_offset_t pmap_extract(
+ pmap_t pmap,
+ vm_offset_t va)
{
- register pt_entry_t *pte;
- register vm_offset_t pa;
- int spl;
+ pt_entry_t *pte;
+ vm_offset_t pa;
+ int spl;
SPLVM(spl);
simple_lock(&pmap->lock);
@@ -2150,9 +2181,6 @@ void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
vm_size_t len;
vm_offset_t src_addr;
{
-#ifdef lint
- dst_pmap++; src_pmap++; dst_addr++; len++; src_addr++;
-#endif /* lint */
}
#endif /* 0 */
@@ -2167,10 +2195,9 @@ void pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr)
* Usage:
* Called by the pageout daemon when pages are scarce.
*/
-void pmap_collect(p)
- pmap_t p;
+void pmap_collect(pmap_t p)
{
- register pt_entry_t *pdp, *ptp;
+ pt_entry_t *pdp, *ptp;
pt_entry_t *eptp;
vm_offset_t pa;
int spl, wired;
@@ -2201,7 +2228,7 @@ void pmap_collect(p)
*/
wired = 0;
{
- register pt_entry_t *ptep;
+ pt_entry_t *ptep;
for (ptep = ptp; ptep < eptp; ptep++) {
if (*ptep & INTEL_PTE_WIRED) {
wired = 1;
@@ -2227,8 +2254,8 @@ void pmap_collect(p)
* Invalidate the page directory pointer.
*/
{
- register int i = ptes_per_vm_page;
- register pt_entry_t *pdep = pdp;
+ int i = ptes_per_vm_page;
+ pt_entry_t *pdep = pdp;
do {
#ifdef MACH_PV_PAGETABLES
unsigned long pte = *pdep;
@@ -2250,7 +2277,7 @@ void pmap_collect(p)
* And free the pte page itself.
*/
{
- register vm_page_t m;
+ vm_page_t m;
vm_object_lock(pmap_object);
m = vm_page_lookup(pmap_object, pa);
@@ -2303,9 +2330,6 @@ void pmap_deactivate(pmap, th, which_cpu)
thread_t th;
int which_cpu;
{
-#ifdef lint
- pmap++; th++; which_cpu++;
-#endif /* lint */
PMAP_DEACTIVATE(pmap, th, which_cpu);
}
#endif /* 0 */
@@ -2378,30 +2402,27 @@ pmap_copy_page(src, dst)
* down (or not) as appropriate.
*/
void
-pmap_pageable(pmap, start, end, pageable)
- pmap_t pmap;
- vm_offset_t start;
- vm_offset_t end;
- boolean_t pageable;
+pmap_pageable(
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end,
+ boolean_t pageable)
{
-#ifdef lint
- pmap++; start++; end++; pageable++;
-#endif /* lint */
}
/*
* Clear specified attribute bits.
*/
void
-phys_attribute_clear(phys, bits)
- vm_offset_t phys;
- int bits;
+phys_attribute_clear(
+ vm_offset_t phys,
+ int bits)
{
pv_entry_t pv_h;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
int pai;
- register pmap_t pmap;
+ pmap_t pmap;
int spl;
assert(phys != vm_page_fictitious_addr);
@@ -2432,7 +2453,7 @@ phys_attribute_clear(phys, bits)
* There are some mappings.
*/
for (pv_e = pv_h; pv_e != PV_ENTRY_NULL; pv_e = pv_e->next) {
- register vm_offset_t va;
+ vm_offset_t va;
pmap = pv_e->pmap;
/*
@@ -2443,19 +2464,17 @@ phys_attribute_clear(phys, bits)
va = pv_e->va;
pte = pmap_pte(pmap, va);
-#if 0
/*
* Consistency checks.
*/
assert(*pte & INTEL_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
-#endif
+ assert(pte_to_pa(*pte) == phys);
/*
* Clear modify or reference bits.
*/
{
- register int i = ptes_per_vm_page;
+ int i = ptes_per_vm_page;
do {
#ifdef MACH_PV_PAGETABLES
if (!(hyp_mmu_update_pte(kv_to_ma(pte), *pte & ~bits)))
@@ -2479,15 +2498,15 @@ phys_attribute_clear(phys, bits)
* Check specified attribute bits.
*/
boolean_t
-phys_attribute_test(phys, bits)
- vm_offset_t phys;
- int bits;
+phys_attribute_test(
+ vm_offset_t phys,
+ int bits)
{
pv_entry_t pv_h;
- register pv_entry_t pv_e;
- register pt_entry_t *pte;
+ pv_entry_t pv_e;
+ pt_entry_t *pte;
int pai;
- register pmap_t pmap;
+ pmap_t pmap;
int spl;
assert(phys != vm_page_fictitious_addr);
@@ -2531,25 +2550,23 @@ phys_attribute_test(phys, bits)
simple_lock(&pmap->lock);
{
- register vm_offset_t va;
+ vm_offset_t va;
va = pv_e->va;
pte = pmap_pte(pmap, va);
-#if 0
/*
* Consistency checks.
*/
assert(*pte & INTEL_PTE_VALID);
- /* assert(pte_to_phys(*pte) == phys); */
-#endif
+ assert(pte_to_pa(*pte) == phys);
}
/*
* Check modify or reference bits.
*/
{
- register int i = ptes_per_vm_page;
+ int i = ptes_per_vm_page;
do {
if (*pte & bits) {
@@ -2570,8 +2587,7 @@ phys_attribute_test(phys, bits)
* Clear the modify bits on the specified physical page.
*/
-void pmap_clear_modify(phys)
- register vm_offset_t phys;
+void pmap_clear_modify(vm_offset_t phys)
{
phys_attribute_clear(phys, PHYS_MODIFIED);
}
@@ -2583,8 +2599,7 @@ void pmap_clear_modify(phys)
* by any physical maps.
*/
-boolean_t pmap_is_modified(phys)
- register vm_offset_t phys;
+boolean_t pmap_is_modified(vm_offset_t phys)
{
return (phys_attribute_test(phys, PHYS_MODIFIED));
}
@@ -2595,8 +2610,7 @@ boolean_t pmap_is_modified(phys)
* Clear the reference bit on the specified physical page.
*/
-void pmap_clear_reference(phys)
- vm_offset_t phys;
+void pmap_clear_reference(vm_offset_t phys)
{
phys_attribute_clear(phys, PHYS_REFERENCED);
}
@@ -2608,8 +2622,7 @@ void pmap_clear_reference(phys)
* by any physical maps.
*/
-boolean_t pmap_is_referenced(phys)
- vm_offset_t phys;
+boolean_t pmap_is_referenced(vm_offset_t phys)
{
return (phys_attribute_test(phys, PHYS_REFERENCED));
}
@@ -2678,13 +2691,14 @@ boolean_t pmap_is_referenced(phys)
/*
* Signal another CPU that it must flush its TLB
*/
-void signal_cpus(use_list, pmap, start, end)
- cpu_set use_list;
- pmap_t pmap;
- vm_offset_t start, end;
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register int which_cpu, j;
- register pmap_update_list_t update_list_p;
+ int which_cpu, j;
+ pmap_update_list_t update_list_p;
while ((which_cpu = ffs(use_list)) != 0) {
which_cpu -= 1; /* convert to 0 origin */
@@ -2717,13 +2731,12 @@ void signal_cpus(use_list, pmap, start, end)
}
}
-void process_pmap_updates(my_pmap)
- register pmap_t my_pmap;
+void process_pmap_updates(pmap_t my_pmap)
{
- register int my_cpu = cpu_number();
- register pmap_update_list_t update_list_p;
- register int j;
- register pmap_t pmap;
+ int my_cpu = cpu_number();
+ pmap_update_list_t update_list_p;
+ int j;
+ pmap_t pmap;
update_list_p = &cpu_update_list[my_cpu];
simple_lock(&update_list_p->lock);
@@ -2748,9 +2761,9 @@ void process_pmap_updates(my_pmap)
*/
void pmap_update_interrupt(void)
{
- register int my_cpu;
- register pmap_t my_pmap;
- int s;
+ int my_cpu;
+ pmap_t my_pmap;
+ int s;
my_cpu = cpu_number();
@@ -2806,7 +2819,7 @@ void pmap_update_interrupt(void)
/*
* Dummy routine to satisfy external reference.
*/
-void pmap_update_interrupt()
+void pmap_update_interrupt(void)
{
/* should never be called. */
}
@@ -2815,10 +2828,11 @@ void pmap_update_interrupt()
#if defined(__i386__)
/* Unmap page 0 to trap NULL references. */
void
-pmap_unmap_page_zero ()
+pmap_unmap_page_zero (void)
{
int *pte;
+ printf("Unmapping the zero page. Some BIOS functions may not be working any more.\n");
pte = (int *) pmap_pte (kernel_pmap, 0);
if (!pte)
return;
diff --git a/i386/intel/pmap.h b/i386/intel/pmap.h
index 93293e3d..382cd5f4 100644
--- a/i386/intel/pmap.h
+++ b/i386/intel/pmap.h
@@ -192,6 +192,16 @@ extern void pmap_clear_bootstrap_pagetable(pt_entry_t *addr);
#define set_pmap(pmap) set_cr3(kvtophys((vm_offset_t)(pmap)->dirbase))
#endif /* PAE */
+typedef struct {
+ pt_entry_t *entry;
+ vm_offset_t vaddr;
+} pmap_mapwindow_t;
+
+extern pmap_mapwindow_t *pmap_get_mapwindow(pt_entry_t entry);
+extern void pmap_put_mapwindow(pmap_mapwindow_t *map);
+
+#define PMAP_NMAPWINDOWS 2
+
#if NCPUS > 1
/*
* List of cpus that are actively using mapped memory. Any
@@ -227,7 +237,7 @@ extern pmap_t kernel_pmap;
* Machine dependent routines that are used only for i386/i486.
*/
-pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr);
+pt_entry_t *pmap_pte(const pmap_t pmap, vm_offset_t addr);
/*
* Macros for speed.
@@ -282,7 +292,7 @@ pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr);
}
#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
- register pmap_t tpmap = (pmap); \
+ pmap_t tpmap = (pmap); \
\
if (tpmap == kernel_pmap) { \
/* \
@@ -324,7 +334,7 @@ pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr);
}
#define PMAP_DEACTIVATE_USER(pmap, thread, my_cpu) { \
- register pmap_t tpmap = (pmap); \
+ pmap_t tpmap = (pmap); \
\
/* \
* Do nothing if this is the kernel pmap. \
@@ -395,7 +405,7 @@ pt_entry_t *pmap_pte(pmap_t pmap, vm_offset_t addr);
}
#define PMAP_ACTIVATE_USER(pmap, th, my_cpu) { \
- register pmap_t tpmap = (pmap); \
+ pmap_t tpmap = (pmap); \
(void) (th); \
(void) (my_cpu); \
\
@@ -451,6 +461,20 @@ extern void pmap_copy_page (vm_offset_t, vm_offset_t);
*/
extern vm_offset_t kvtophys (vm_offset_t);
+void pmap_remove_range(
+ pmap_t pmap,
+ vm_offset_t va,
+ pt_entry_t *spte,
+ pt_entry_t *epte);
+
+#if NCPUS > 1
+void signal_cpus(
+ cpu_set use_list,
+ pmap_t pmap,
+ vm_offset_t start,
+ vm_offset_t end);
+#endif /* NCPUS > 1 */
+
#endif /* __ASSEMBLER__ */
#endif /* _PMAP_MACHINE_ */
diff --git a/i386/intel/read_fault.c b/i386/intel/read_fault.c
index 762f60da..4b1edce3 100644
--- a/i386/intel/read_fault.c
+++ b/i386/intel/read_fault.c
@@ -31,7 +31,7 @@
#include <vm/vm_page.h>
#include <vm/pmap.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
/*
* Expansion of vm_fault for read fault in kernel mode.
@@ -39,9 +39,9 @@
* ignores write protection in kernel mode.
*/
kern_return_t
-intel_read_fault(map, vaddr)
- vm_map_t map;
- vm_offset_t vaddr;
+intel_read_fault(
+ vm_map_t map,
+ vm_offset_t vaddr)
{
vm_map_version_t version; /* Map version for
verification */
@@ -52,7 +52,7 @@ intel_read_fault(map, vaddr)
vm_page_t top_page; /* Placeholder page */
boolean_t wired; /* Is map region wired? */
kern_return_t result;
- register vm_page_t m;
+ vm_page_t m;
RetryFault:
diff --git a/include/cache.h b/include/cache.h
new file mode 100644
index 00000000..6260366a
--- /dev/null
+++ b/include/cache.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _MACH_CACHE_H_
+#define _MACH_CACHE_H_
+
+/* This macro can be used to align statically allocated objects so
+ that they start at a cache line. */
+#define __cacheline_aligned __attribute__((aligned(1 << CPU_L1_SHIFT)))
+
+#endif /* _MACH_CACHE_H_ */
diff --git a/include/device/device.defs b/include/device/device.defs
index d9234e39..409146f5 100644
--- a/include/device/device.defs
+++ b/include/device/device.defs
@@ -45,14 +45,29 @@ subsystem
serverprefix ds_;
type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE | polymorphic
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
routine device_open(
master_port : mach_port_t;
sreplyport reply_port : reply_port_t;
mode : dev_mode_t;
name : dev_name_t;
- out device : device_t
+ out device : device_t =
+ MACH_MSG_TYPE_PORT_SEND
+ ctype: mach_port_t
+#if KERNEL_SERVER
+ outtran: mach_port_t convert_device_to_port(device_t)
+#else
+#ifdef DEVICE_OUTTRAN
+ outtran: DEVICE_OUTTRAN
+#endif
+#endif /* KERNEL_SERVER */
);
routine device_close(
diff --git a/include/device/device_reply.defs b/include/device/device_reply.defs
index 34156776..5a325075 100644
--- a/include/device/device_reply.defs
+++ b/include/device/device_reply.defs
@@ -54,7 +54,13 @@ serverdemux seqnos_device_reply_server;
#endif /* SEQNOS */
type reply_port_t = polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
simpleroutine device_open_reply(
reply_port : reply_port_t;
diff --git a/include/device/device_request.defs b/include/device/device_request.defs
index e8aab2a6..7ea8637c 100644
--- a/include/device/device_request.defs
+++ b/include/device/device_request.defs
@@ -37,7 +37,13 @@ subsystem device_request 2800; /* to match device.defs */
serverprefix ds_;
type reply_port_t = MACH_MSG_TYPE_MAKE_SEND_ONCE
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
simpleroutine device_open_request(
device_server_port : mach_port_t;
diff --git a/include/device/device_types.defs b/include/device/device_types.defs
index 79e4c5b0..49cc2717 100644
--- a/include/device/device_types.defs
+++ b/include/device/device_types.defs
@@ -39,6 +39,10 @@
#include <mach/std_types.defs>
+#ifdef DEVICE_IMPORTS
+DEVICE_IMPORTS
+#endif
+
type recnum_t = unsigned32;
type dev_mode_t = unsigned32;
type dev_flavor_t = unsigned32;
@@ -55,6 +59,19 @@ type device_t = mach_port_t
intran: device_t dev_port_lookup(mach_port_t)
outtran: mach_port_t convert_device_to_port(device_t)
destructor: device_deallocate(device_t)
+#else /* KERNEL_SERVER */
+#ifdef DEVICE_INTRAN
+ intran: DEVICE_INTRAN
+#endif
+#ifdef DEVICE_INTRAN_PAYLOAD
+ intranpayload: DEVICE_INTRAN_PAYLOAD
+#endif
+#ifdef DEVICE_OUTTRAN
+ outtran: DEVICE_OUTTRAN
+#endif
+#ifdef DEVICE_DESTRUCTOR
+ destructor: DEVICE_DESTRUCTOR
+#endif
#endif /* KERNEL_SERVER */
;
diff --git a/include/device/device_types.h b/include/device/device_types.h
index caf4fc04..a6db051e 100644
--- a/include/device/device_types.h
+++ b/include/device/device_types.h
@@ -135,4 +135,6 @@ typedef int io_return_t;
#define D_NO_MEMORY 2508 /* memory allocation failure */
#define D_READ_ONLY 2509 /* device cannot be written to */
+void device_deallocate(device_t);
+
#endif /* DEVICE_TYPES_H */
diff --git a/include/device/tape_status.h b/include/device/tape_status.h
index 97cb098a..603d76c5 100644
--- a/include/device/tape_status.h
+++ b/include/device/tape_status.h
@@ -137,4 +137,4 @@ struct mtget {
#define MTIOCEEOT _IO('m', 4) /* enable EOT error */
-#endif _TAPE_STATUS_H_
+#endif /* _TAPE_STATUS_H_ */
diff --git a/include/device/tty_status.h b/include/device/tty_status.h
index 15249a40..2eed5d03 100644
--- a/include/device/tty_status.h
+++ b/include/device/tty_status.h
@@ -30,6 +30,9 @@
* Status information for tty.
*/
+#ifndef _DEVICE_TTY_STATUS_H_
+#define _DEVICE_TTY_STATUS_H_
+
struct tty_status {
int tt_ispeed; /* input speed */
int tt_ospeed; /* output speed */
@@ -127,3 +130,5 @@ struct tty_status {
/* clear break condition */
#define TTY_SET_TRANSLATION (dev_flavor_t)(('t'<<16) + 8)
/* set translation table */
+
+#endif /* _DEVICE_TTY_STATUS_H_ */
diff --git a/include/mach/alert.h b/include/mach/alert.h
index 8232f9ef..e8eb3713 100644
--- a/include/mach/alert.h
+++ b/include/mach/alert.h
@@ -34,4 +34,4 @@
#define ALERT_USER 0xffff0000 /* User-defined alert bits */
-#endif _MACH_ALERT_H_
+#endif /* _MACH_ALERT_H_ */
diff --git a/include/mach/boot.h b/include/mach/boot.h
index d3e141fa..7f14cc4b 100644
--- a/include/mach/boot.h
+++ b/include/mach/boot.h
@@ -65,7 +65,7 @@ struct boot_rendezvous
int code;
};
-#endif !__ASSEMBLER__
+#endif /* !__ASSEMBLER__ */
/* This is the magic value that must appear in boot_module.magic. */
@@ -90,4 +90,4 @@ struct boot_rendezvous
#define BRZV_DATA 'D'
-#endif _MACH_BOOT_
+#endif /* _MACH_BOOT_ */
diff --git a/include/mach/default_pager_types.defs b/include/mach/default_pager_types.defs
index 3164f047..bee7c259 100644
--- a/include/mach/default_pager_types.defs
+++ b/include/mach/default_pager_types.defs
@@ -41,4 +41,4 @@ type default_pager_filename_t = (MACH_MSG_TYPE_STRING_C, 8*256);
import <mach/default_pager_types.h>;
-#endif _MACH_DEFAULT_PAGER_TYPES_DEFS_
+#endif /* _MACH_DEFAULT_PAGER_TYPES_DEFS_ */
diff --git a/include/mach/default_pager_types.h b/include/mach/default_pager_types.h
index 99e43ce3..f5ce5a4f 100644
--- a/include/mach/default_pager_types.h
+++ b/include/mach/default_pager_types.h
@@ -55,4 +55,4 @@ typedef default_pager_page_t *default_pager_page_array_t;
typedef char default_pager_filename_t[256];
-#endif _MACH_DEFAULT_PAGER_TYPES_H_
+#endif /* _MACH_DEFAULT_PAGER_TYPES_H_ */
diff --git a/include/mach/gnumach.defs b/include/mach/gnumach.defs
index 73313343..dd4da870 100644
--- a/include/mach/gnumach.defs
+++ b/include/mach/gnumach.defs
@@ -27,6 +27,11 @@ subsystem
#include <mach/std_types.defs>
#include <mach/mach_types.defs>
+#include <mach_debug/mach_debug_types.defs>
+
+#ifdef GNUMACH_IMPORTS
+GNUMACH_IMPORTS
+#endif
type vm_cache_statistics_data_t = struct[11] of integer_t;
@@ -37,3 +42,45 @@ type vm_cache_statistics_data_t = struct[11] of integer_t;
routine vm_cache_statistics(
target_task : vm_task_t;
out vm_cache_stats : vm_cache_statistics_data_t);
+
+/*
+ * Terminate a thread and release rights and memory.
+ *
+ * Intended to be used by threading libraries to provide a clean way for
+ * threads to terminate themselves. The resources a thread wouldn't be able
+ * to release without this call when terminating itself are its
+ * last reference to its kernel port, its reply port, and its stack.
+ *
+ * This call is semantically equivalent to :
+ * - mach_port_deallocate(task, thread_name);
+ * - if (reply_port != MACH_PORT_NULL)
+ * mach_port_destroy(task, reply_port);
+ * - if ((address != 0) || (size != 0))
+ * vm_deallocate(task, address, size)
+ * - thread_terminate(thread)
+ *
+ * Implemented as a simple routine so a reply port isn't required.
+ */
+simpleroutine thread_terminate_release(
+ thread : thread_t;
+ task : task_t;
+ thread_name : mach_port_name_t;
+ reply_port : mach_port_name_t;
+ address : vm_address_t;
+ size : vm_size_t);
+
+/*
+ * Set the name of task TASK to NAME. This is a debugging aid.
+ * NAME will be used in error messages printed by the kernel.
+ */
+simpleroutine task_set_name(
+ task : task_t;
+ name : kernel_debug_name_t);
+
+/*
+ * Register a port to which a notification about newly created tasks
+ * are sent.
+ */
+routine register_new_task_notification(
+ host_priv : host_priv_t;
+ notification : mach_port_send_t);
diff --git a/include/mach/mach.defs b/include/mach/mach.defs
index 58510805..20dc8637 100644
--- a/include/mach/mach.defs
+++ b/include/mach/mach.defs
@@ -46,6 +46,10 @@ userprefix r_;
#include <mach/std_types.defs>
#include <mach/mach_types.defs>
+#ifdef MACH_IMPORTS
+MACH_IMPORTS
+#endif
+
skip; /* old port_allocate */
skip; /* old port_deallocate */
skip; /* old port_enable */
@@ -54,6 +58,9 @@ skip; /* old port_select */
skip; /* old port_set_backlog */
skip; /* old port_status */
+/* We use only a handful of RPCs as client. Skip the rest. */
+#if ! KERNEL_USER
+
/*
* Create a new task with an empty set of IPC rights,
* and having an address space constructed from the
@@ -306,6 +313,18 @@ skip; /* old u*x_pid */
skip; /* old netipc_listen */
skip; /* old netipc_ignore */
+#else /* ! KERNEL_USER */
+
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip;
+
+#endif /* ! KERNEL_USER */
+
/*
* Provide the data contents of a range of the given memory
* object, with the access restriction specified. [Only
@@ -347,6 +366,8 @@ routine memory_object_get_attributes(
out may_cache : boolean_t;
out copy_strategy : memory_object_copy_strategy_t);
+#if ! KERNEL_USER
+
/*
* Sets the default memory manager, the port to which
* newly-created temporary memory objects are delivered.
@@ -357,6 +378,12 @@ routine vm_set_default_memory_manager(
host_priv : host_priv_t;
inout default_manager : mach_port_make_send_t);
+#else /* ! KERNEL_USER */
+
+skip;
+
+#endif /* ! KERNEL_USER */
+
skip; /* old pager_flush_request */
/*
@@ -409,6 +436,8 @@ skip; /* old netport_enter */
skip; /* old netport_remove */
skip; /* old thread_set_priority */
+#if ! KERNEL_USER
+
/*
* Increment the suspend count for the target task.
* No threads within a task may run when the suspend
@@ -609,6 +638,18 @@ routine vm_map(
inheritance : vm_inherit_t);
#endif /* EMULATOR */
+#else /* ! KERNEL_USER */
+
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip; skip;
+skip; skip; skip; skip;
+
+#endif /* ! KERNEL_USER */
+
/*
* Indicate that a range of the specified memory object cannot
* be provided at this time. [Threads waiting for memory pages
@@ -680,6 +721,8 @@ simpleroutine memory_object_change_attributes(
reply_to : mach_port_t =
MACH_MSG_TYPE_MAKE_SEND_ONCE|polymorphic);
+#if ! KERNEL_USER
+
skip; /* old host_callout_statistics_reset */
skip; /* old port_set_select */
skip; /* old port_set_backup */
@@ -698,6 +741,8 @@ routine vm_machine_attribute(
skip; /* old host_fpa_counters_reset */
+#endif /* ! KERNEL_USER */
+
/*
* There is no more room in this interface for additional calls.
*/
diff --git a/include/mach/mach_host.defs b/include/mach/mach_host.defs
index 2644146f..28439a01 100644
--- a/include/mach/mach_host.defs
+++ b/include/mach/mach_host.defs
@@ -47,6 +47,10 @@ subsystem
#include <mach/std_types.defs>
#include <mach/mach_types.defs>
+#ifdef MACH_HOST_IMPORTS
+MACH_HOST_IMPORTS
+#endif
+
/*
* Get list of processors on this host.
*/
@@ -292,7 +296,7 @@ routine host_reboot(
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
routine vm_wire(
- host_priv : host_priv_t;
+ host : mach_port_t;
task : vm_task_t;
address : vm_address_t;
size : vm_size_t;
diff --git a/include/mach/mach_port.defs b/include/mach/mach_port.defs
index e1f45e3c..c21c34bc 100644
--- a/include/mach/mach_port.defs
+++ b/include/mach/mach_port.defs
@@ -176,14 +176,7 @@ routine mach_port_mod_refs(
right : mach_port_right_t;
delta : mach_port_delta_t);
-/*
- * Temporary compatibility call.
- */
-
-routine old_mach_port_get_receive_status(
- task : ipc_space_t;
- name : mach_port_name_t;
- out status : old_mach_port_status_t);
+skip; /* old old_mach_port_get_receive_status */
/*
* Only valid for receive rights.
@@ -342,5 +335,28 @@ routine mach_port_create_act(
user_rbuf_size : vm_size_t;
out new_act : thread_t);
+#else /* MIGRATING_THREADS */
+
+skip; /* mach_port_set_rpcinfo */
+skip; /* mach_port_create_act */
+
#endif /* MIGRATING_THREADS */
+/*
+ * Only valid for receive rights.
+ * Set the protected payload for this right to the given value.
+ */
+
+routine mach_port_set_protected_payload(
+ task : ipc_space_t;
+ name : mach_port_name_t;
+ payload : natural_t);
+
+/*
+ * Only valid for receive rights.
+ * Clear the protected payload for this right.
+ */
+
+routine mach_port_clear_protected_payload(
+ task : ipc_space_t;
+ name : mach_port_name_t);
diff --git a/include/mach/mach_types.defs b/include/mach/mach_types.defs
index 607d5d92..8e68d385 100644
--- a/include/mach/mach_types.defs
+++ b/include/mach/mach_types.defs
@@ -60,8 +60,6 @@ serverprefix SERVERPREFIX;
type mach_port_status_t = struct[9] of integer_t;
-type old_mach_port_status_t = struct[8] of integer_t; /* compatibility */
-
type task_t = mach_port_t
ctype: mach_port_t
#if KERNEL_SERVER
@@ -135,6 +133,19 @@ type memory_object_t = mach_port_t
ctype: mach_port_t
#if KERNEL_SERVER
intran: ipc_port_t null_conversion(mach_port_t)
+#else /* KERNEL_SERVER */
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_OUTTRAN
+ outtran: MEMORY_OBJECT_OUTTRAN
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
#endif /* KERNEL_SERVER */
;
diff --git a/include/mach/memory_object.defs b/include/mach/memory_object.defs
index ea7989aa..6372ded8 100644
--- a/include/mach/memory_object.defs
+++ b/include/mach/memory_object.defs
@@ -42,6 +42,10 @@ subsystem
#include <mach/std_types.defs>
#include <mach/mach_types.defs>
+#ifdef MEMORY_OBJECT_IMPORTS
+MEMORY_OBJECT_IMPORTS
+#endif
+
#if SEQNOS
serverprefix seqnos_;
serverdemux seqnos_memory_object_server;
@@ -85,7 +89,18 @@ simpleroutine memory_object_init(
simpleroutine memory_object_terminate(
memory_object : memory_object_t =
MACH_MSG_TYPE_MOVE_SEND
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload:
+ MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
#endif /* SEQNOS */
@@ -221,7 +236,17 @@ simpleroutine memory_object_data_write(
simpleroutine memory_object_lock_completed(
memory_object : memory_object_t =
polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
#endif /* SEQNOS */
@@ -252,7 +277,17 @@ simpleroutine memory_object_lock_completed(
simpleroutine memory_object_supply_completed(
memory_object : memory_object_t =
polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
#endif /* SEQNOS */
@@ -298,7 +333,17 @@ simpleroutine memory_object_data_return(
simpleroutine memory_object_change_completed(
memory_object : memory_object_t =
polymorphic|MACH_MSG_TYPE_PORT_SEND_ONCE
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifdef MEMORY_OBJECT_INTRAN
+ intran: MEMORY_OBJECT_INTRAN
+#endif
+#ifdef MEMORY_OBJECT_INTRAN_PAYLOAD
+ intranpayload: MEMORY_OBJECT_INTRAN_PAYLOAD
+#endif
+#ifdef MEMORY_OBJECT_DESTRUCTOR
+ destructor: MEMORY_OBJECT_DESTRUCTOR
+#endif
+ ;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
#endif /* SEQNOS */
diff --git a/include/mach/memory_object_default.defs b/include/mach/memory_object_default.defs
index 0eac2714..cfd54a48 100644
--- a/include/mach/memory_object_default.defs
+++ b/include/mach/memory_object_default.defs
@@ -40,6 +40,10 @@ subsystem
#include <mach/std_types.defs>
#include <mach/mach_types.defs>
+#ifdef MEMORY_OBJECT_IMPORTS
+MEMORY_OBJECT_IMPORTS
+#endif
+
#if SEQNOS
serverprefix seqnos_;
serverdemux seqnos_memory_object_default_server;
diff --git a/include/mach/message.h b/include/mach/message.h
index f78e9780..0a7297e1 100644
--- a/include/mach/message.h
+++ b/include/mach/message.h
@@ -136,7 +136,10 @@ typedef struct {
mach_msg_bits_t msgh_bits;
mach_msg_size_t msgh_size;
mach_port_t msgh_remote_port;
- mach_port_t msgh_local_port;
+ union {
+ mach_port_t msgh_local_port;
+ unsigned long msgh_protected_payload;
+ };
mach_port_seqno_t msgh_seqno;
mach_msg_id_t msgh_id;
} mach_msg_header_t;
@@ -253,7 +256,9 @@ typedef struct {
#define MACH_MSG_TYPE_PORT_SEND MACH_MSG_TYPE_MOVE_SEND
#define MACH_MSG_TYPE_PORT_SEND_ONCE MACH_MSG_TYPE_MOVE_SEND_ONCE
-#define MACH_MSG_TYPE_LAST 22 /* Last assigned */
+#define MACH_MSG_TYPE_PROTECTED_PAYLOAD 23
+
+#define MACH_MSG_TYPE_LAST 23 /* Last assigned */
/*
* A dummy value. Mostly used to indicate that the actual value
diff --git a/include/mach/multiboot.h b/include/mach/multiboot.h
index 3880fa80..b23df4a4 100644
--- a/include/mach/multiboot.h
+++ b/include/mach/multiboot.h
@@ -79,4 +79,4 @@ struct multiboot_info
natural_t pad[4];
};
-#endif _MACH_MULTIBOOT_H_
+#endif /* _MACH_MULTIBOOT_H_ */
diff --git a/include/mach/notify.defs b/include/mach/notify.defs
index e06f6b41..6ba4cde7 100644
--- a/include/mach/notify.defs
+++ b/include/mach/notify.defs
@@ -28,16 +28,33 @@ subsystem notify 64;
#include <mach/std_types.defs>
+#ifdef NOTIFY_IMPORTS
+NOTIFY_IMPORTS
+#endif
+
#if SEQNOS
serverprefix do_seqnos_;
serverdemux seqnos_notify_server;
-#else SEQNOS
+#else
serverprefix do_;
serverdemux notify_server;
-#endif SEQNOS
+#endif
type notify_port_t = MACH_MSG_TYPE_MOVE_SEND_ONCE
- ctype: mach_port_t;
+ ctype: mach_port_t
+#ifdef NOTIFY_INTRAN
+ intran: NOTIFY_INTRAN
+#endif
+#ifdef NOTIFY_INTRAN_PAYLOAD
+ intranpayload: NOTIFY_INTRAN_PAYLOAD
+#endif
+#ifdef NOTIFY_OUTTRAN
+ outtran: NOTIFY_OUTTRAN
+#endif
+#ifdef NOTIFY_DESTRUCTOR
+ destructor: NOTIFY_DESTRUCTOR
+#endif
+;
/* MACH_NOTIFY_FIRST: 0100 */
skip;
@@ -47,7 +64,7 @@ simpleroutine mach_notify_port_deleted(
notify : notify_port_t;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
-#endif SEQNOS
+#endif
name : mach_port_name_t);
/* MACH_NOTIFY_MSG_ACCEPTED: 0102 */
@@ -55,7 +72,7 @@ simpleroutine mach_notify_msg_accepted(
notify : notify_port_t;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
-#endif SEQNOS
+#endif
name : mach_port_name_t);
skip; /* was NOTIFY_OWNERSHIP_RIGHTS: 0103 */
@@ -67,7 +84,7 @@ simpleroutine mach_notify_port_destroyed(
notify : notify_port_t;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
-#endif SEQNOS
+#endif
rights : mach_port_receive_t);
/* MACH_NOTIFY_NO_SENDERS: 0106 */
@@ -75,7 +92,7 @@ simpleroutine mach_notify_no_senders(
notify : notify_port_t;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
-#endif SEQNOS
+#endif
mscount : mach_port_mscount_t);
/* MACH_NOTIFY_SEND_ONCE: 0107 */
@@ -83,7 +100,7 @@ simpleroutine mach_notify_send_once(
notify : notify_port_t
#if SEQNOS
; msgseqno seqno : mach_port_seqno_t
-#endif SEQNOS
+#endif
);
/* MACH_NOTIFY_DEAD_NAME: 0110 */
@@ -91,5 +108,5 @@ simpleroutine mach_notify_dead_name(
notify : notify_port_t;
#if SEQNOS
msgseqno seqno : mach_port_seqno_t;
-#endif SEQNOS
+#endif
name : mach_port_name_t);
diff --git a/include/mach/port.h b/include/mach/port.h
index 53f60716..3036a921 100644
--- a/include/mach/port.h
+++ b/include/mach/port.h
@@ -137,6 +137,8 @@ typedef struct mach_port_status {
/*
* Compatibility definitions, for code written
* before there was an mps_seqno field.
+ *
+ * XXX: Remove this before releasing Gnumach 1.6.
*/
typedef struct old_mach_port_status {
diff --git a/include/mach/profil.h b/include/mach/profil.h
index 0eb4ce47..866f267b 100644
--- a/include/mach/profil.h
+++ b/include/mach/profil.h
@@ -137,7 +137,7 @@ extern vm_map_t kernel_map;
/* MACRO set_pbuf_value
**
** enters the value 'val' in the buffer 'pbuf' and returns the following
-** indications: 0: means that a fatal error occured: the buffer was full
+** indications: 0: means that a fatal error occurred: the buffer was full
** (it hasn't been sent yet)
** 1: means that a value has been inserted successfully
** 2: means that we'v just entered the last value causing
diff --git a/include/mach/rpc.h b/include/mach/rpc.h
index d3098f80..36eb5921 100644
--- a/include/mach/rpc.h
+++ b/include/mach/rpc.h
@@ -21,7 +21,6 @@
#include <mach/kern_return.h>
#include <mach/message.h>
-#include <mach/machine/rpc.h>
/*
* Description of a port passed up by the leaky-register RPC path
diff --git a/include/mach/std_types.defs b/include/mach/std_types.defs
index 00d1a698..a1f156d9 100644
--- a/include/mach/std_types.defs
+++ b/include/mach/std_types.defs
@@ -49,7 +49,13 @@ type pointer_t = ^array[] of MACH_MSG_TYPE_BYTE
ctype: vm_offset_t;
-type mach_port_t = MACH_MSG_TYPE_COPY_SEND;
+type mach_port_t = MACH_MSG_TYPE_COPY_SEND
+#ifndef KERNEL_SERVER
+#ifdef MACH_PAYLOAD_TO_PORT
+ intranpayload: mach_port_t MACH_PAYLOAD_TO_PORT
+#endif /* MACH_PAYLOAD_TO_PORT */
+#endif /* KERNEL_SERVER */
+;
type mach_port_array_t = array[] of mach_port_t;
type mach_port_name_t = MACH_MSG_TYPE_PORT_NAME
diff --git a/include/mach/task_notify.defs b/include/mach/task_notify.defs
new file mode 100644
index 00000000..5485d4e3
--- /dev/null
+++ b/include/mach/task_notify.defs
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2014 Free Software Foundation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+subsystem
+#if KERNEL_SERVER
+ KernelServer
+#endif /* KERNEL_SERVER */
+#if KERNEL_USER
+ KernelUser
+#endif /* KERNEL_USER */
+ task_notify 4400;
+
+#include <mach/std_types.defs>
+#include <mach/mach_types.defs>
+
+/* These notifications are sent to the port registered via
+ `register_new_task_notification' and provide a robust parental
+ relation between tasks. */
+simpleroutine mach_notify_new_task(
+ notify : mach_port_t;
+ task : task_t;
+ parent : task_t);
diff --git a/include/mach/time_value.h b/include/mach/time_value.h
index 2a2f0911..3a9c384c 100644
--- a/include/mach/time_value.h
+++ b/include/mach/time_value.h
@@ -45,29 +45,48 @@ typedef struct time_value time_value_t;
*/
#define TIME_MICROS_MAX (1000000)
+#define time_value_assert(val) \
+ assert(0 <= (val)->microseconds && (val)->microseconds < TIME_MICROS_MAX);
+
#define time_value_add_usec(val, micros) { \
+ time_value_assert(val); \
if (((val)->microseconds += (micros)) \
>= TIME_MICROS_MAX) { \
(val)->microseconds -= TIME_MICROS_MAX; \
(val)->seconds++; \
} \
+ time_value_assert(val); \
}
-#define time_value_add(result, addend) { \
- (result)->microseconds += (addend)->microseconds; \
- (result)->seconds += (addend)->seconds; \
- if ((result)->microseconds >= TIME_MICROS_MAX) { \
- (result)->microseconds -= TIME_MICROS_MAX; \
- (result)->seconds++; \
- } \
+#define time_value_sub_usec(val, micros) { \
+ time_value_assert(val); \
+ if (((val)->microseconds -= (micros)) < 0) { \
+ (val)->microseconds += TIME_MICROS_MAX; \
+ (val)->seconds--; \
+ } \
+ time_value_assert(val); \
}
+#define time_value_add(result, addend) { \
+ time_value_assert(addend); \
+ (result)->seconds += (addend)->seconds; \
+ time_value_add_usec(result, (addend)->microseconds); \
+ }
+
+#define time_value_sub(result, subtrahend) { \
+ time_value_assert(subtrahend); \
+ (result)->seconds -= (subtrahend)->seconds; \
+ time_value_sub_usec(result, (subtrahend)->microseconds); \
+ }
+
/*
* Time value available through the mapped-time interface.
* Read this mapped value with
* do {
* secs = mtime->seconds;
+ * __sync_synchronize();
* usecs = mtime->microseconds;
+ * __sync_synchronize();
* } while (secs != mtime->check_seconds);
*/
@@ -77,4 +96,16 @@ typedef struct mapped_time_value {
integer_t check_seconds;
} mapped_time_value_t;
+/* Macros for converting between struct timespec and time_value_t. */
+
+#define TIME_VALUE_TO_TIMESPEC(tv, ts) do { \
+ (ts)->tv_sec = (tv)->seconds; \
+ (ts)->tv_nsec = (tv)->microseconds * 1000; \
+} while(0)
+
+#define TIMESPEC_TO_TIME_VALUE(tv, ts) do { \
+ (tv)->seconds = (ts)->tv_sec; \
+ (tv)->microseconds = (ts)->tv_nsec / 1000; \
+} while(0)
+
#endif /* _MACH_TIME_VALUE_H_ */
diff --git a/include/mach/version.h b/include/mach/version.h
index ec12ea74..3ef78592 100644
--- a/include/mach/version.h
+++ b/include/mach/version.h
@@ -43,6 +43,9 @@
* minor 0.
*/
+#ifndef _MACH_VERSION_H_
+#define _MACH_VERSION_H_
+
#define KERNEL_MAJOR_VERSION 4
#define KERNEL_MINOR_VERSION 0
@@ -66,3 +69,5 @@
* excised from the CSD environment.
*/
#define INCLUDE_VERSION 0
+
+#endif /* _MACH_VERSION_H_ */
diff --git a/include/mach/vm_param.h b/include/mach/vm_param.h
index 03609815..cdccce82 100644
--- a/include/mach/vm_param.h
+++ b/include/mach/vm_param.h
@@ -39,7 +39,7 @@
#include <mach/machine/vm_types.h>
/*
- * The machine independent pages are refered to as PAGES. A page
+ * The machine independent pages are referred to as PAGES. A page
* is some number of hardware pages, depending on the target machine.
*
* All references to the size of a page should be done
diff --git a/include/mach_debug/ipc_info.h b/include/mach_debug/ipc_info.h
index ef0b0c6a..a47ae7b4 100644
--- a/include/mach_debug/ipc_info.h
+++ b/include/mach_debug/ipc_info.h
@@ -43,40 +43,17 @@
* in mach_debug_types.defs when adding/removing fields.
*/
-
-typedef struct ipc_info_space {
- natural_t iis_genno_mask; /* generation number mask */
- natural_t iis_table_size; /* size of table */
- natural_t iis_table_next; /* next possible size of table */
- natural_t iis_tree_size; /* size of tree */
- natural_t iis_tree_small; /* # of small entries in tree */
- natural_t iis_tree_hash; /* # of hashed entries in tree */
-} ipc_info_space_t;
-
-
typedef struct ipc_info_name {
mach_port_t iin_name; /* port name, including gen number */
-/*boolean_t*/integer_t iin_collision; /* collision at this entry? */
-/*boolean_t*/integer_t iin_compat; /* is this a compat-mode entry? */
/*boolean_t*/integer_t iin_marequest; /* extant msg-accepted request? */
mach_port_type_t iin_type; /* straight port type */
mach_port_urefs_t iin_urefs; /* user-references */
vm_offset_t iin_object; /* object pointer */
natural_t iin_next; /* marequest/next in free list */
- natural_t iin_hash; /* hash index */
} ipc_info_name_t;
typedef ipc_info_name_t *ipc_info_name_array_t;
-
-typedef struct ipc_info_tree_name {
- ipc_info_name_t iitn_name;
- mach_port_t iitn_lchild; /* name of left child */
- mach_port_t iitn_rchild; /* name of right child */
-} ipc_info_tree_name_t;
-
-typedef ipc_info_tree_name_t *ipc_info_tree_name_array_t;
-
/*
* Type definitions for mach_port_kernel_object.
* By remarkable coincidence, these closely resemble
diff --git a/include/mach_debug/mach_debug.defs b/include/mach_debug/mach_debug.defs
index 053c3fe6..c8e8b1b4 100644
--- a/include/mach_debug/mach_debug.defs
+++ b/include/mach_debug/mach_debug.defs
@@ -57,14 +57,7 @@ routine mach_port_get_srights(
name : mach_port_name_t;
out srights : mach_port_rights_t);
-/*
- * Returns information about the global reverse hash table.
- */
-
-routine host_ipc_hash_info(
- host : host_t;
- out info : hash_info_bucket_array_t,
- CountInOut, Dealloc);
+skip; /* host_ipc_hash_info */
/*
* Returns information about the marequest hash table.
@@ -76,17 +69,7 @@ routine host_ipc_marequest_info(
out info : hash_info_bucket_array_t,
CountInOut, Dealloc);
-/*
- * Returns information about an IPC space.
- */
-
-routine mach_port_space_info(
- task : ipc_space_t;
- out info : ipc_info_space_t;
- out table_info : ipc_info_name_array_t,
- CountInOut, Dealloc;
- out tree_info : ipc_info_tree_name_array_t,
- CountInOut, Dealloc);
+skip; /* mach_port_space_info */
/*
* Returns information about the dead-name requests
@@ -218,6 +201,12 @@ routine mach_vm_object_pages(
out pages : vm_page_info_array_t,
CountInOut, Dealloc);
+#else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+skip; /* mach_vm_region_info */
+skip; /* mach_vm_object_info */
+skip; /* mach_vm_object_pages */
+#endif /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
+
/*
* Returns information about the memory allocation caches.
*/
@@ -225,9 +214,3 @@ routine host_slab_info(
host : host_t;
out info : cache_info_array_t,
CountInOut, Dealloc);
-
-#else /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
-skip; /* mach_vm_region_info */
-skip; /* mach_vm_object_info */
-skip; /* mach_vm_object_pages */
-#endif /* !defined(MACH_VM_DEBUG) || MACH_VM_DEBUG */
diff --git a/include/mach_debug/mach_debug_types.defs b/include/mach_debug/mach_debug_types.defs
index f60125a0..8df2f344 100644
--- a/include/mach_debug/mach_debug_types.defs
+++ b/include/mach_debug/mach_debug_types.defs
@@ -38,14 +38,9 @@ type cache_info_array_t = array[] of cache_info_t;
type hash_info_bucket_t = struct[1] of natural_t;
type hash_info_bucket_array_t = array[] of hash_info_bucket_t;
-type ipc_info_space_t = struct[6] of natural_t;
-
-type ipc_info_name_t = struct[9] of natural_t;
+type ipc_info_name_t = struct[6] of natural_t;
type ipc_info_name_array_t = array[] of ipc_info_name_t;
-type ipc_info_tree_name_t = struct[11] of natural_t;
-type ipc_info_tree_name_array_t = array[] of ipc_info_tree_name_t;
-
type vm_region_info_t = struct[11] of natural_t;
type vm_region_info_array_t = array[] of vm_region_info_t;
@@ -57,6 +52,8 @@ type vm_page_info_array_t = array[] of vm_page_info_t;
type symtab_name_t = (MACH_MSG_TYPE_STRING_C, 8*32);
+type kernel_debug_name_t = c_string[*: 64];
+
import <mach_debug/mach_debug_types.h>;
#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_DEFS_ */
diff --git a/include/mach_debug/mach_debug_types.h b/include/mach_debug/mach_debug_types.h
index 5d4efcde..9c7d1fde 100644
--- a/include/mach_debug/mach_debug_types.h
+++ b/include/mach_debug/mach_debug_types.h
@@ -37,4 +37,15 @@
typedef char symtab_name_t[32];
+/*
+ * A fixed-length string data type intended for names given to
+ * kernel objects.
+ *
+ * Note that it is not guaranteed that the in-kernel data
+ * structure will hold KERNEL_DEBUG_NAME_MAX bytes. The given
+ * name will be truncated to fit into the target data structure.
+ */
+#define KERNEL_DEBUG_NAME_MAX (64)
+typedef char kernel_debug_name_t[KERNEL_DEBUG_NAME_MAX];
+
#endif /* _MACH_DEBUG_MACH_DEBUG_TYPES_H_ */
diff --git a/include/mach_debug/pc_info.h b/include/mach_debug/pc_info.h
index bc43fa8d..912da9fd 100644
--- a/include/mach_debug/pc_info.h
+++ b/include/mach_debug/pc_info.h
@@ -40,4 +40,4 @@ typedef struct sampled_pc {
typedef sampled_pc_t *sampled_pc_array_t;
typedef unsigned int sampled_pc_seqno_t;
-#endif _MACH_DEBUG_PC_INFO_H_
+#endif /* _MACH_DEBUG_PC_INFO_H_ */
diff --git a/include/mach_debug/slab_info.h b/include/mach_debug/slab_info.h
index 37dcb8c4..7d12cc18 100644
--- a/include/mach_debug/slab_info.h
+++ b/include/mach_debug/slab_info.h
@@ -36,12 +36,6 @@
#define CACHE_NAME_MAX_LEN 32
-#define CACHE_FLAGS_NO_CPU_POOL 0x01
-#define CACHE_FLAGS_SLAB_EXTERNAL 0x02
-#define CACHE_FLAGS_NO_RECLAIM 0x04
-#define CACHE_FLAGS_VERIFY 0x08
-#define CACHE_FLAGS_DIRECT 0x10
-
typedef struct cache_info {
int flags;
size_t cpu_pool_size;
diff --git a/include/string.h b/include/string.h
index c77d387b..c31b4292 100644
--- a/include/string.h
+++ b/include/string.h
@@ -32,7 +32,7 @@ extern void *memcpy (void *dest, const void *src, size_t n);
extern void *memmove (void *dest, const void *src, size_t n);
-extern int memcmp (const void *s1, const void *s2, size_t n);
+extern int memcmp (const void *s1, const void *s2, size_t n) __attribute__ ((pure));
extern void *memset (void *s, int c, size_t n);
@@ -46,11 +46,11 @@ extern char *strrchr (const char *s, int c);
extern char *strsep (char **strp, const char *delim);
-extern int strcmp (const char *s1, const char *s2);
+extern int strcmp (const char *s1, const char *s2) __attribute__ ((pure));
-extern int strncmp (const char *s1, const char *s2, size_t n);
+extern int strncmp (const char *s1, const char *s2, size_t n) __attribute__ ((pure));
-extern size_t strlen (const char *s);
+extern size_t strlen (const char *s) __attribute__ ((pure));
extern char *strstr(const char *haystack, const char *needle);
diff --git a/ipc/ipc_entry.c b/ipc/ipc_entry.c
index 3a062447..0414ba5f 100644
--- a/ipc/ipc_entry.c
+++ b/ipc/ipc_entry.c
@@ -46,158 +46,17 @@
#include <ipc/ipc_types.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_space.h>
-#include <ipc/ipc_splay.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_object.h>
-struct kmem_cache ipc_tree_entry_cache;
-
-/*
- * Routine: ipc_entry_tree_collision
- * Purpose:
- * Checks if "name" collides with an allocated name
- * in the space's tree. That is, returns TRUE
- * if the splay tree contains a name with the same
- * index as "name".
- * Conditions:
- * The space is locked (read or write) and active.
- */
-
-boolean_t
-ipc_entry_tree_collision(
- ipc_space_t space,
- mach_port_t name)
-{
- mach_port_index_t index;
- mach_port_t lower, upper;
-
- assert(space->is_active);
-
- /*
- * Check if we collide with the next smaller name
- * or the next larger name.
- */
-
- ipc_splay_tree_bounds(&space->is_tree, name, &lower, &upper);
-
- index = MACH_PORT_INDEX(name);
- return (((lower != ~0) && (MACH_PORT_INDEX(lower) == index)) ||
- ((upper != 0) && (MACH_PORT_INDEX(upper) == index)));
-}
-
-/*
- * Routine: ipc_entry_lookup
- * Purpose:
- * Searches for an entry, given its name.
- * Conditions:
- * The space must be read or write locked throughout.
- * The space must be active.
- */
-
-ipc_entry_t
-ipc_entry_lookup(space, name)
- ipc_space_t space;
- mach_port_t name;
-{
- mach_port_index_t index;
- ipc_entry_t entry;
-
- assert(space->is_active);
-
- index = MACH_PORT_INDEX(name);
- if (index < space->is_table_size) {
- entry = &space->is_table[index];
- if (IE_BITS_GEN(entry->ie_bits) != MACH_PORT_GEN(name))
- if (entry->ie_bits & IE_BITS_COLLISION) {
- assert(space->is_tree_total > 0);
- goto tree_lookup;
- } else
- entry = IE_NULL;
- else if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
- entry = IE_NULL;
- } else if (space->is_tree_total == 0)
- entry = IE_NULL;
- else
- tree_lookup:
- entry = (ipc_entry_t)
- ipc_splay_tree_lookup(&space->is_tree, name);
-
- assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
- return entry;
-}
-
-/*
- * Routine: ipc_entry_get
- * Purpose:
- * Tries to allocate an entry out of the space.
- * Conditions:
- * The space is write-locked and active throughout.
- * An object may be locked. Will not allocate memory.
- * Returns:
- * KERN_SUCCESS A free entry was found.
- * KERN_NO_SPACE No entry allocated.
- */
-
-kern_return_t
-ipc_entry_get(space, namep, entryp)
- ipc_space_t space;
- mach_port_t *namep;
- ipc_entry_t *entryp;
-{
- ipc_entry_t table;
- mach_port_index_t first_free;
- mach_port_t new_name;
- ipc_entry_t free_entry;
-
- assert(space->is_active);
-
- table = space->is_table;
- first_free = table->ie_next;
-
- if (first_free == 0)
- return KERN_NO_SPACE;
-
- free_entry = &table[first_free];
- table->ie_next = free_entry->ie_next;
-
- /*
- * Initialize the new entry. We need only
- * increment the generation number and clear ie_request.
- */
-
- {
- mach_port_gen_t gen;
-
- assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
- gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
- free_entry->ie_bits = gen;
- free_entry->ie_request = 0;
- new_name = MACH_PORT_MAKE(first_free, gen);
- }
-
- /*
- * The new name can't be MACH_PORT_NULL because index
- * is non-zero. It can't be MACH_PORT_DEAD because
- * the table isn't allowed to grow big enough.
- * (See comment in ipc/ipc_table.h.)
- */
-
- assert(MACH_PORT_VALID(new_name));
- assert(free_entry->ie_object == IO_NULL);
-
- *namep = new_name;
- *entryp = free_entry;
- return KERN_SUCCESS;
-}
+struct kmem_cache ipc_entry_cache;
/*
* Routine: ipc_entry_alloc
* Purpose:
* Allocate an entry out of the space.
* Conditions:
- * The space is not locked before, but it is write-locked after
- * if the call is successful. May allocate memory.
+ * The space must be write-locked. May allocate memory.
* Returns:
* KERN_SUCCESS An entry was allocated.
* KERN_INVALID_TASK The space is dead.
@@ -212,23 +71,37 @@ ipc_entry_alloc(
ipc_entry_t *entryp)
{
kern_return_t kr;
+ ipc_entry_t entry;
+ rdxtree_key_t key;
- is_write_lock(space);
+ if (!space->is_active) {
+ return KERN_INVALID_TASK;
+ }
- for (;;) {
- if (!space->is_active) {
- is_write_unlock(space);
- return KERN_INVALID_TASK;
- }
+ kr = ipc_entry_get(space, namep, entryp);
+ if (kr == KERN_SUCCESS)
+ return kr;
- kr = ipc_entry_get(space, namep, entryp);
- if (kr == KERN_SUCCESS)
- return kr;
+ entry = ie_alloc();
+ if (entry == IE_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
- kr = ipc_entry_grow_table(space);
- if (kr != KERN_SUCCESS)
- return kr; /* space is unlocked */
+ kr = rdxtree_insert_alloc(&space->is_map, entry, &key);
+ if (kr) {
+ ie_free(entry);
+ return kr;
}
+ space->is_size += 1;
+
+ entry->ie_bits = 0;
+ entry->ie_object = IO_NULL;
+ entry->ie_request = 0;
+ entry->ie_name = (mach_port_t) key;
+
+ *entryp = entry;
+ *namep = (mach_port_t) key;
+ return KERN_SUCCESS;
}
/*
@@ -237,8 +110,7 @@ ipc_entry_alloc(
* Allocates/finds an entry with a specific name.
* If an existing entry is returned, its type will be nonzero.
* Conditions:
- * The space is not locked before, but it is write-locked after
- * if the call is successful. May allocate memory.
+ * The space must be write-locked. May allocate memory.
* Returns:
* KERN_SUCCESS Found existing entry with same name.
* KERN_SUCCESS Allocated a new entry.
@@ -252,601 +124,80 @@ ipc_entry_alloc_name(
mach_port_t name,
ipc_entry_t *entryp)
{
- mach_port_index_t index = MACH_PORT_INDEX(name);
- mach_port_gen_t gen = MACH_PORT_GEN(name);
- ipc_tree_entry_t tree_entry = ITE_NULL;
-
+ kern_return_t kr;
+ ipc_entry_t entry, e, *prevp;
+ void **slot;
assert(MACH_PORT_VALID(name));
+ if (!space->is_active) {
+ return KERN_INVALID_TASK;
+ }
- is_write_lock(space);
-
- for (;;) {
- ipc_entry_t entry;
- ipc_tree_entry_t tentry;
- ipc_table_size_t its;
-
- if (!space->is_active) {
- is_write_unlock(space);
- if (tree_entry) ite_free(tree_entry);
- return KERN_INVALID_TASK;
- }
-
- /*
- * If we are under the table cutoff,
- * there are three cases:
- * 1) The entry is inuse, for the same name
- * 2) The entry is inuse, for a different name
- * 3) The entry is free
- */
-
- if ((0 < index) && (index < space->is_table_size)) {
- ipc_entry_t table = space->is_table;
-
- entry = &table[index];
-
- if (IE_BITS_TYPE(entry->ie_bits)) {
- if (IE_BITS_GEN(entry->ie_bits) == gen) {
- *entryp = entry;
- if (tree_entry) ite_free(tree_entry);
- return KERN_SUCCESS;
- }
- } else {
- mach_port_index_t free_index, next_index;
-
- /*
- * Rip the entry out of the free list.
- */
-
- for (free_index = 0;
- (next_index = table[free_index].ie_next)
- != index;
- free_index = next_index)
- continue;
-
- table[free_index].ie_next =
- table[next_index].ie_next;
-
- entry->ie_bits = gen;
- assert(entry->ie_object == IO_NULL);
- entry->ie_request = 0;
-
- *entryp = entry;
- if (tree_entry) ite_free(tree_entry);
- return KERN_SUCCESS;
- }
- }
-
- /*
- * Before trying to allocate any memory,
- * check if the entry already exists in the tree.
- * This avoids spurious resource errors.
- * The splay tree makes a subsequent lookup/insert
- * of the same name cheap, so this costs little.
- */
-
- if ((space->is_tree_total > 0) &&
- ((tentry = ipc_splay_tree_lookup(&space->is_tree, name))
- != ITE_NULL)) {
- assert(tentry->ite_space == space);
- assert(IE_BITS_TYPE(tentry->ite_bits));
+ slot = rdxtree_lookup_slot(&space->is_map, (rdxtree_key_t) name);
+ if (slot != NULL)
+ entry = *(ipc_entry_t *) slot;
- *entryp = &tentry->ite_entry;
- if (tree_entry) ite_free(tree_entry);
- return KERN_SUCCESS;
+ if (slot == NULL || entry == IE_NULL) {
+ entry = ie_alloc();
+ if (entry == IE_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
}
- its = space->is_table_next;
-
- /*
- * Check if the table should be grown.
- *
- * Note that if space->is_table_size == its->its_size,
- * then we won't ever try to grow the table.
- *
- * Note that we are optimistically assuming that name
- * doesn't collide with any existing names. (So if
- * it were entered into the tree, is_tree_small would
- * be incremented.) This is OK, because even in that
- * case, we don't lose memory by growing the table.
- */
+ entry->ie_bits = 0;
+ entry->ie_object = IO_NULL;
+ entry->ie_request = 0;
+ entry->ie_name = name;
- if ((space->is_table_size <= index) &&
- (index < its->its_size) &&
- (((its->its_size - space->is_table_size) *
- sizeof(struct ipc_entry)) <
- ((space->is_tree_small + 1) *
- sizeof(struct ipc_tree_entry)))) {
- kern_return_t kr;
-
- /*
- * Can save space by growing the table.
- * Because the space will be unlocked,
- * we must restart.
- */
-
- kr = ipc_entry_grow_table(space);
- assert(kr != KERN_NO_SPACE);
+ if (slot != NULL)
+ rdxtree_replace_slot(slot, entry);
+ else {
+ kr = rdxtree_insert(&space->is_map,
+ (rdxtree_key_t) name, entry);
if (kr != KERN_SUCCESS) {
- /* space is unlocked */
- if (tree_entry) ite_free(tree_entry);
+ ie_free(entry);
return kr;
}
-
- continue;
- }
-
- /*
- * If a splay-tree entry was allocated previously,
- * go ahead and insert it into the tree.
- */
-
- if (tree_entry != ITE_NULL) {
- space->is_tree_total++;
-
- if (index < space->is_table_size)
- space->is_table[index].ie_bits |=
- IE_BITS_COLLISION;
- else if ((index < its->its_size) &&
- !ipc_entry_tree_collision(space, name))
- space->is_tree_small++;
-
- ipc_splay_tree_insert(&space->is_tree,
- name, tree_entry);
-
- tree_entry->ite_bits = 0;
- tree_entry->ite_object = IO_NULL;
- tree_entry->ite_request = 0;
- tree_entry->ite_space = space;
- *entryp = &tree_entry->ite_entry;
- return KERN_SUCCESS;
}
+ space->is_size += 1;
- /*
- * Allocate a tree entry and try again.
- */
-
- is_write_unlock(space);
- tree_entry = ite_alloc();
- if (tree_entry == ITE_NULL)
- return KERN_RESOURCE_SHORTAGE;
- is_write_lock(space);
+ *entryp = entry;
+ return KERN_SUCCESS;
}
-}
-
-/*
- * Routine: ipc_entry_dealloc
- * Purpose:
- * Deallocates an entry from a space.
- * Conditions:
- * The space must be write-locked throughout.
- * The space must be active.
- */
-
-void
-ipc_entry_dealloc(
- ipc_space_t space,
- mach_port_t name,
- ipc_entry_t entry)
-{
- ipc_entry_t table;
- ipc_entry_num_t size;
- mach_port_index_t index;
- assert(space->is_active);
- assert(entry->ie_object == IO_NULL);
- assert(entry->ie_request == 0);
-
- index = MACH_PORT_INDEX(name);
- table = space->is_table;
- size = space->is_table_size;
-
- if ((index < size) && (entry == &table[index])) {
- assert(IE_BITS_GEN(entry->ie_bits) == MACH_PORT_GEN(name));
-
- if (entry->ie_bits & IE_BITS_COLLISION) {
- struct ipc_splay_tree small, collisions;
- ipc_tree_entry_t tentry;
- mach_port_t tname;
- boolean_t pick;
- ipc_entry_bits_t bits;
- ipc_object_t obj;
-
- /* must move an entry from tree to table */
-
- ipc_splay_tree_split(&space->is_tree,
- MACH_PORT_MAKE(index+1, 0),
- &collisions);
- ipc_splay_tree_split(&collisions,
- MACH_PORT_MAKE(index, 0),
- &small);
-
- pick = ipc_splay_tree_pick(&collisions,
- &tname, &tentry);
- assert(pick);
- assert(MACH_PORT_INDEX(tname) == index);
-
- bits = tentry->ite_bits;
- entry->ie_bits = bits | MACH_PORT_GEN(tname);
- entry->ie_object = obj = tentry->ite_object;
- entry->ie_request = tentry->ite_request;
- assert(tentry->ite_space == space);
-
- if (IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND) {
- ipc_hash_global_delete(space, obj,
- tname, tentry);
- ipc_hash_local_insert(space, obj,
- index, entry);
- }
-
- ipc_splay_tree_delete(&collisions, tname, tentry);
-
- assert(space->is_tree_total > 0);
- space->is_tree_total--;
-
- /* check if collision bit should still be on */
-
- pick = ipc_splay_tree_pick(&collisions,
- &tname, &tentry);
- if (pick) {
- entry->ie_bits |= IE_BITS_COLLISION;
- ipc_splay_tree_join(&space->is_tree,
- &collisions);
- }
-
- ipc_splay_tree_join(&space->is_tree, &small);
- } else {
- entry->ie_bits &= IE_BITS_GEN_MASK;
- entry->ie_next = table->ie_next;
- table->ie_next = index;
- }
- } else {
- ipc_tree_entry_t tentry = (ipc_tree_entry_t) entry;
-
- assert(tentry->ite_space == space);
-
- ipc_splay_tree_delete(&space->is_tree, name, tentry);
-
- assert(space->is_tree_total > 0);
- space->is_tree_total--;
-
- if (index < size) {
- ipc_entry_t ientry = &table[index];
-
- assert(ientry->ie_bits & IE_BITS_COLLISION);
-
- if (!ipc_entry_tree_collision(space, name))
- ientry->ie_bits &= ~IE_BITS_COLLISION;
- } else if ((index < space->is_table_next->its_size) &&
- !ipc_entry_tree_collision(space, name)) {
- assert(space->is_tree_small > 0);
- space->is_tree_small--;
- }
+ if (IE_BITS_TYPE(entry->ie_bits)) {
+ /* Used entry. */
+ *entryp = entry;
+ return KERN_SUCCESS;
}
-}
-/*
- * Routine: ipc_entry_grow_table
- * Purpose:
- * Grows the table in a space.
- * Conditions:
- * The space must be write-locked and active before.
- * If successful, it is also returned locked.
- * Allocates memory.
- * Returns:
- * KERN_SUCCESS Grew the table.
- * KERN_SUCCESS Somebody else grew the table.
- * KERN_SUCCESS The space died.
- * KERN_NO_SPACE Table has maximum size already.
- * KERN_RESOURCE_SHORTAGE Couldn't allocate a new table.
- */
-
-kern_return_t
-ipc_entry_grow_table(space)
- ipc_space_t space;
-{
- ipc_entry_num_t osize, size, nsize;
-
- do {
- ipc_entry_t otable, table;
- ipc_table_size_t oits, its, nits;
- mach_port_index_t i, free_index;
-
- assert(space->is_active);
-
- if (space->is_growing) {
- /*
- * Somebody else is growing the table.
- * We just wait for them to finish.
- */
-
- assert_wait((event_t) space, FALSE);
- is_write_unlock(space);
- thread_block((void (*)()) 0);
- is_write_lock(space);
- return KERN_SUCCESS;
- }
-
- otable = space->is_table;
- its = space->is_table_next;
- size = its->its_size;
- oits = its - 1;
- osize = oits->its_size;
- nits = its + 1;
- nsize = nits->its_size;
-
- if (osize == size) {
- is_write_unlock(space);
- printf_once("no more room for ipc_entry_grow_table in space %p\n", space);
- return KERN_NO_SPACE;
- }
-
- assert((osize < size) && (size <= nsize));
-
- /*
- * OK, we'll attempt to grow the table.
- * The realloc requires that the old table
- * remain in existence.
- */
+ /* Free entry. Rip the entry out of the free list. */
+ for (prevp = &space->is_free_list, e = space->is_free_list;
+ e != entry;
+ ({ prevp = &e->ie_next_free; e = e->ie_next_free; }))
+ continue;
- space->is_growing = TRUE;
- is_write_unlock(space);
- if (it_entries_reallocable(oits))
- table = it_entries_realloc(oits, otable, its);
- else
- table = it_entries_alloc(its);
- is_write_lock(space);
- space->is_growing = FALSE;
+ *prevp = entry->ie_next_free;
+ space->is_free_list_size -= 1;
- /*
- * We need to do a wakeup on the space,
- * to rouse waiting threads. We defer
- * this until the space is unlocked,
- * because we don't want them to spin.
- */
-
- if (table == IE_NULL) {
- is_write_unlock(space);
- thread_wakeup((event_t) space);
- return KERN_RESOURCE_SHORTAGE;
- }
-
- if (!space->is_active) {
- /*
- * The space died while it was unlocked.
- */
-
- is_write_unlock(space);
- thread_wakeup((event_t) space);
- it_entries_free(its, table);
- is_write_lock(space);
- return KERN_SUCCESS;
- }
-
- assert(space->is_table == otable);
- assert(space->is_table_next == its);
- assert(space->is_table_size == osize);
-
- space->is_table = table;
- space->is_table_size = size;
- space->is_table_next = nits;
-
- /*
- * If we did a realloc, it remapped the data.
- * Otherwise we copy by hand first. Then we have
- * to clear the index fields in the old part and
- * zero the new part.
- */
-
- if (!it_entries_reallocable(oits))
- memcpy(table, otable,
- osize * sizeof(struct ipc_entry));
-
- for (i = 0; i < osize; i++)
- table[i].ie_index = 0;
-
- (void) memset((void *) (table + osize), 0,
- (size - osize) * sizeof(struct ipc_entry));
-
- /*
- * Put old entries into the reverse hash table.
- */
-
- for (i = 0; i < osize; i++) {
- ipc_entry_t entry = &table[i];
-
- if (IE_BITS_TYPE(entry->ie_bits) ==
- MACH_PORT_TYPE_SEND)
- ipc_hash_local_insert(space, entry->ie_object,
- i, entry);
- }
-
- /*
- * If there are entries in the splay tree,
- * then we have work to do:
- * 1) transfer entries to the table
- * 2) update is_tree_small
- */
-
- if (space->is_tree_total > 0) {
- mach_port_index_t index;
- boolean_t delete;
- struct ipc_splay_tree ignore;
- struct ipc_splay_tree move;
- struct ipc_splay_tree small;
- ipc_entry_num_t nosmall;
- ipc_tree_entry_t tentry;
-
- /*
- * The splay tree divides into four regions,
- * based on the index of the entries:
- * 1) 0 <= index < osize
- * 2) osize <= index < size
- * 3) size <= index < nsize
- * 4) nsize <= index
- *
- * Entries in the first part are ignored.
- * Entries in the second part, that don't
- * collide, are moved into the table.
- * Entries in the third part, that don't
- * collide, are counted for is_tree_small.
- * Entries in the fourth part are ignored.
- */
-
- ipc_splay_tree_split(&space->is_tree,
- MACH_PORT_MAKE(nsize, 0),
- &small);
- ipc_splay_tree_split(&small,
- MACH_PORT_MAKE(size, 0),
- &move);
- ipc_splay_tree_split(&move,
- MACH_PORT_MAKE(osize, 0),
- &ignore);
-
- /* move entries into the table */
-
- for (tentry = ipc_splay_traverse_start(&move);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&move, delete)) {
- mach_port_t name;
- mach_port_gen_t gen;
- mach_port_type_t type;
- ipc_entry_bits_t bits;
- ipc_object_t obj;
- ipc_entry_t entry;
-
- name = tentry->ite_name;
- gen = MACH_PORT_GEN(name);
- index = MACH_PORT_INDEX(name);
-
- assert(tentry->ite_space == space);
- assert((osize <= index) && (index < size));
-
- entry = &table[index];
-
- /* collision with previously moved entry? */
-
- bits = entry->ie_bits;
- if (bits != 0) {
- assert(IE_BITS_TYPE(bits));
- assert(IE_BITS_GEN(bits) != gen);
-
- entry->ie_bits =
- bits | IE_BITS_COLLISION;
- delete = FALSE;
- continue;
- }
-
- bits = tentry->ite_bits;
- type = IE_BITS_TYPE(bits);
- assert(type != MACH_PORT_TYPE_NONE);
-
- entry->ie_bits = bits | gen;
- entry->ie_object = obj = tentry->ite_object;
- entry->ie_request = tentry->ite_request;
-
- if (type == MACH_PORT_TYPE_SEND) {
- ipc_hash_global_delete(space, obj,
- name, tentry);
- ipc_hash_local_insert(space, obj,
- index, entry);
- }
-
- space->is_tree_total--;
- delete = TRUE;
- }
- ipc_splay_traverse_finish(&move);
-
- /* count entries for is_tree_small */
-
- nosmall = 0; index = 0;
- for (tentry = ipc_splay_traverse_start(&small);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&small, FALSE)) {
- mach_port_index_t nindex;
-
- nindex = MACH_PORT_INDEX(tentry->ite_name);
-
- if (nindex != index) {
- nosmall++;
- index = nindex;
- }
- }
- ipc_splay_traverse_finish(&small);
-
- assert(nosmall <= (nsize - size));
- assert(nosmall <= space->is_tree_total);
- space->is_tree_small = nosmall;
-
- /* put the splay tree back together */
-
- ipc_splay_tree_join(&space->is_tree, &small);
- ipc_splay_tree_join(&space->is_tree, &move);
- ipc_splay_tree_join(&space->is_tree, &ignore);
- }
-
- /*
- * Add entries in the new part which still aren't used
- * to the free list. Add them in reverse order,
- * and set the generation number to -1, so that
- * early allocations produce "natural" names.
- */
-
- free_index = table[0].ie_next;
- for (i = size-1; i >= osize; --i) {
- ipc_entry_t entry = &table[i];
-
- if (entry->ie_bits == 0) {
- entry->ie_bits = IE_BITS_GEN_MASK;
- entry->ie_next = free_index;
- free_index = i;
- }
- }
- table[0].ie_next = free_index;
-
- /*
- * Now we need to free the old table.
- * If the space dies or grows while unlocked,
- * then we can quit here.
- */
-
- is_write_unlock(space);
- thread_wakeup((event_t) space);
- it_entries_free(oits, otable);
- is_write_lock(space);
- if (!space->is_active || (space->is_table_next != nits))
- return KERN_SUCCESS;
-
- /*
- * We might have moved enough entries from
- * the splay tree into the table that
- * the table can be profitably grown again.
- *
- * Note that if size == nsize, then
- * space->is_tree_small == 0.
- */
- } while ((space->is_tree_small > 0) &&
- (((nsize - size) * sizeof(struct ipc_entry)) <
- (space->is_tree_small * sizeof(struct ipc_tree_entry))));
+ entry->ie_bits = 0;
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_name == name);
+ entry->ie_request = 0;
+ space->is_size += 1;
+ *entryp = entry;
return KERN_SUCCESS;
}
-
#if MACH_KDB
#include <ddb/db_output.h>
#include <kern/task.h>
#define printf kdbprintf
-ipc_entry_t db_ipc_object_by_name(
- task_t task,
- mach_port_t name);
-
-
ipc_entry_t
db_ipc_object_by_name(
- task_t task,
+ const task_t task,
mach_port_t name)
{
ipc_space_t space = task->itk_space;
diff --git a/ipc/ipc_entry.h b/ipc/ipc_entry.h
index 6afa4f68..b429984b 100644
--- a/ipc/ipc_entry.h
+++ b/ipc/ipc_entry.h
@@ -48,47 +48,27 @@
/*
* Spaces hold capabilities for ipc_object_t's (ports and port sets).
- * Each ipc_entry_t records a capability. Most capabilities have
- * small names, and the entries are elements of a table.
- * Capabilities can have large names, and a splay tree holds
- * those entries. The cutoff point between the table and the tree
- * is adjusted dynamically to minimize memory consumption.
- *
- * The ie_index field of entries in the table implements
- * a ordered hash table with open addressing and linear probing.
- * This hash table converts (space, object) -> name.
- * It is used independently of the other fields.
- *
- * Free (unallocated) entries in the table have null ie_object
- * fields. The ie_bits field is zero except for IE_BITS_GEN.
- * The ie_next (ie_request) field links free entries into a free list.
- *
- * The first entry in the table (index 0) is always free.
- * It is used as the head of the free list.
+ * Each ipc_entry_t records a capability.
*/
typedef unsigned int ipc_entry_bits_t;
typedef ipc_table_elems_t ipc_entry_num_t; /* number of entries */
typedef struct ipc_entry {
+ mach_port_t ie_name;
ipc_entry_bits_t ie_bits;
struct ipc_object *ie_object;
union {
- mach_port_index_t next;
+ struct ipc_entry *next_free;
/*XXX ipc_port_request_index_t request;*/
unsigned int request;
} index;
- union {
- mach_port_index_t table;
- struct ipc_tree_entry *tree;
- } hash;
} *ipc_entry_t;
#define IE_NULL ((ipc_entry_t) 0)
#define ie_request index.request
-#define ie_next index.next
-#define ie_index hash.table
+#define ie_next_free index.next_free
#define IE_BITS_UREFS_MASK 0x0000ffff /* 16 bits of user-reference */
#define IE_BITS_UREFS(bits) ((bits) & IE_BITS_UREFS_MASK)
@@ -98,12 +78,10 @@ typedef struct ipc_entry {
#define IE_BITS_MAREQUEST 0x00200000 /* 1 bit for msg-accepted */
-#define IE_BITS_COMPAT 0x00400000 /* 1 bit for compatibility */
-
-#define IE_BITS_COLLISION 0x00800000 /* 1 bit for collisions */
-#define IE_BITS_RIGHT_MASK 0x007fffff /* relevant to the right */
+#define IE_BITS_RIGHT_MASK 0x003fffff /* relevant to the right */
#if PORT_GENERATIONS
+#error "not supported"
#define IE_BITS_GEN_MASK 0xff000000U /* 8 bits for generation */
#define IE_BITS_GEN(bits) ((bits) & IE_BITS_GEN_MASK)
#define IE_BITS_GEN_ONE 0x01000000 /* low bit of generation */
@@ -114,32 +92,9 @@ typedef struct ipc_entry {
#endif
-typedef struct ipc_tree_entry {
- struct ipc_entry ite_entry;
- mach_port_t ite_name;
- struct ipc_space *ite_space;
- struct ipc_tree_entry *ite_lchild;
- struct ipc_tree_entry *ite_rchild;
-} *ipc_tree_entry_t;
-
-#define ITE_NULL ((ipc_tree_entry_t) 0)
-
-#define ite_bits ite_entry.ie_bits
-#define ite_object ite_entry.ie_object
-#define ite_request ite_entry.ie_request
-#define ite_next ite_entry.hash.tree
-
-extern struct kmem_cache ipc_tree_entry_cache;
-
-#define ite_alloc() ((ipc_tree_entry_t) kmem_cache_alloc(&ipc_tree_entry_cache))
-#define ite_free(ite) kmem_cache_free(&ipc_tree_entry_cache, (vm_offset_t) (ite))
-
-
-extern ipc_entry_t
-ipc_entry_lookup(ipc_space_t space, mach_port_t name);
-
-extern kern_return_t
-ipc_entry_get(ipc_space_t space, mach_port_t *namep, ipc_entry_t *entryp);
+extern struct kmem_cache ipc_entry_cache;
+#define ie_alloc() ((ipc_entry_t) kmem_cache_alloc(&ipc_entry_cache))
+#define ie_free(e) kmem_cache_free(&ipc_entry_cache, (vm_offset_t) (e))
extern kern_return_t
ipc_entry_alloc(ipc_space_t space, mach_port_t *namep, ipc_entry_t *entryp);
@@ -147,10 +102,9 @@ ipc_entry_alloc(ipc_space_t space, mach_port_t *namep, ipc_entry_t *entryp);
extern kern_return_t
ipc_entry_alloc_name(ipc_space_t space, mach_port_t name, ipc_entry_t *entryp);
-extern void
-ipc_entry_dealloc(ipc_space_t space, mach_port_t name, ipc_entry_t entry);
-
-extern kern_return_t
-ipc_entry_grow_table(ipc_space_t space);
+ipc_entry_t
+db_ipc_object_by_name(
+ task_t task,
+ mach_port_t name);
#endif /* _IPC_IPC_ENTRY_H_ */
diff --git a/ipc/ipc_hash.c b/ipc/ipc_hash.c
deleted file mode 100644
index 5eec58cb..00000000
--- a/ipc/ipc_hash.c
+++ /dev/null
@@ -1,620 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: ipc/ipc_hash.c
- * Author: Rich Draves
- * Date: 1989
- *
- * Entry hash table operations.
- */
-
-#include <kern/printf.h>
-#include <mach/boolean.h>
-#include <mach/port.h>
-#include <kern/lock.h>
-#include <kern/kalloc.h>
-#include <ipc/port.h>
-#include <ipc/ipc_space.h>
-#include <ipc/ipc_object.h>
-#include <ipc/ipc_entry.h>
-#include <ipc/ipc_hash.h>
-#include <ipc/ipc_init.h>
-#include <ipc/ipc_types.h>
-
-#if MACH_IPC_DEBUG
-#include <mach/kern_return.h>
-#include <mach_debug/hash_info.h>
-#include <vm/vm_map.h>
-#include <vm/vm_kern.h>
-#include <vm/vm_user.h>
-#endif
-
-
-
-/*
- * Routine: ipc_hash_lookup
- * Purpose:
- * Converts (space, obj) -> (name, entry).
- * Returns TRUE if an entry was found.
- * Conditions:
- * The space must be locked (read or write) throughout.
- */
-
-boolean_t
-ipc_hash_lookup(space, obj, namep, entryp)
- ipc_space_t space;
- ipc_object_t obj;
- mach_port_t *namep;
- ipc_entry_t *entryp;
-{
- return (ipc_hash_local_lookup(space, obj, namep, entryp) ||
- ((space->is_tree_hash > 0) &&
- ipc_hash_global_lookup(space, obj, namep,
- (ipc_tree_entry_t *) entryp)));
-}
-
-/*
- * Routine: ipc_hash_insert
- * Purpose:
- * Inserts an entry into the appropriate reverse hash table,
- * so that ipc_hash_lookup will find it.
- * Conditions:
- * The space must be write-locked.
- */
-
-void
-ipc_hash_insert(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_t name,
- ipc_entry_t entry)
-{
- mach_port_index_t index;
-
- index = MACH_PORT_INDEX(name);
- if ((index < space->is_table_size) &&
- (entry == &space->is_table[index]))
- ipc_hash_local_insert(space, obj, index, entry);
- else
- ipc_hash_global_insert(space, obj, name,
- (ipc_tree_entry_t) entry);
-}
-
-/*
- * Routine: ipc_hash_delete
- * Purpose:
- * Deletes an entry from the appropriate reverse hash table.
- * Conditions:
- * The space must be write-locked.
- */
-
-void
-ipc_hash_delete(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_t name,
- ipc_entry_t entry)
-{
- mach_port_index_t index;
-
- index = MACH_PORT_INDEX(name);
- if ((index < space->is_table_size) &&
- (entry == &space->is_table[index]))
- ipc_hash_local_delete(space, obj, index, entry);
- else
- ipc_hash_global_delete(space, obj, name,
- (ipc_tree_entry_t) entry);
-}
-
-/*
- * The global reverse hash table holds splay tree entries.
- * It is a simple open-chaining hash table with singly-linked buckets.
- * Each bucket is locked separately, with an exclusive lock.
- * Within each bucket, move-to-front is used.
- */
-
-ipc_hash_index_t ipc_hash_global_size;
-ipc_hash_index_t ipc_hash_global_mask;
-
-#define IH_GLOBAL_HASH(space, obj) \
- (((((ipc_hash_index_t) ((vm_offset_t)space)) >> 4) + \
- (((ipc_hash_index_t) ((vm_offset_t)obj)) >> 6)) & \
- ipc_hash_global_mask)
-
-typedef struct ipc_hash_global_bucket {
- decl_simple_lock_data(, ihgb_lock_data)
- ipc_tree_entry_t ihgb_head;
-} *ipc_hash_global_bucket_t;
-
-#define IHGB_NULL ((ipc_hash_global_bucket_t) 0)
-
-#define ihgb_lock_init(ihgb) simple_lock_init(&(ihgb)->ihgb_lock_data)
-#define ihgb_lock(ihgb) simple_lock(&(ihgb)->ihgb_lock_data)
-#define ihgb_unlock(ihgb) simple_unlock(&(ihgb)->ihgb_lock_data)
-
-ipc_hash_global_bucket_t ipc_hash_global_table;
-
-/*
- * Routine: ipc_hash_global_lookup
- * Purpose:
- * Converts (space, obj) -> (name, entry).
- * Looks in the global table, for splay tree entries.
- * Returns TRUE if an entry was found.
- * Conditions:
- * The space must be locked (read or write) throughout.
- */
-
-boolean_t
-ipc_hash_global_lookup(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_t *namep,
- ipc_tree_entry_t *entryp)
-{
- ipc_hash_global_bucket_t bucket;
- ipc_tree_entry_t this, *last;
-
- assert(space != IS_NULL);
- assert(obj != IO_NULL);
-
- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
- ihgb_lock(bucket);
-
- if ((this = bucket->ihgb_head) != ITE_NULL) {
- if ((this->ite_object == obj) &&
- (this->ite_space == space)) {
- /* found it at front; no need to move */
-
- *namep = this->ite_name;
- *entryp = this;
- } else for (last = &this->ite_next;
- (this = *last) != ITE_NULL;
- last = &this->ite_next) {
- if ((this->ite_object == obj) &&
- (this->ite_space == space)) {
- /* found it; move to front */
-
- *last = this->ite_next;
- this->ite_next = bucket->ihgb_head;
- bucket->ihgb_head = this;
-
- *namep = this->ite_name;
- *entryp = this;
- break;
- }
- }
- }
-
- ihgb_unlock(bucket);
- return this != ITE_NULL;
-}
-
-/*
- * Routine: ipc_hash_global_insert
- * Purpose:
- * Inserts an entry into the global reverse hash table.
- * Conditions:
- * The space must be write-locked.
- */
-
-void
-ipc_hash_global_insert(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_t name,
- ipc_tree_entry_t entry)
-{
- ipc_hash_global_bucket_t bucket;
-
-
- assert(entry->ite_name == name);
- assert(space != IS_NULL);
- assert(entry->ite_space == space);
- assert(obj != IO_NULL);
- assert(entry->ite_object == obj);
-
- space->is_tree_hash++;
- assert(space->is_tree_hash <= space->is_tree_total);
-
- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
- ihgb_lock(bucket);
-
- /* insert at front of bucket */
-
- entry->ite_next = bucket->ihgb_head;
- bucket->ihgb_head = entry;
-
- ihgb_unlock(bucket);
-}
-
-/*
- * Routine: ipc_hash_global_delete
- * Purpose:
- * Deletes an entry from the global reverse hash table.
- * Conditions:
- * The space must be write-locked.
- */
-
-void
-ipc_hash_global_delete(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_t name,
- ipc_tree_entry_t entry)
-{
- ipc_hash_global_bucket_t bucket;
- ipc_tree_entry_t this, *last;
-
- assert(entry->ite_name == name);
- assert(space != IS_NULL);
- assert(entry->ite_space == space);
- assert(obj != IO_NULL);
- assert(entry->ite_object == obj);
-
- assert(space->is_tree_hash > 0);
- space->is_tree_hash--;
-
- bucket = &ipc_hash_global_table[IH_GLOBAL_HASH(space, obj)];
- ihgb_lock(bucket);
-
- for (last = &bucket->ihgb_head;
- (this = *last) != ITE_NULL;
- last = &this->ite_next) {
- if (this == entry) {
- /* found it; remove from bucket */
-
- *last = this->ite_next;
- break;
- }
- }
- assert(this != ITE_NULL);
-
- ihgb_unlock(bucket);
-}
-
-/*
- * Each space has a local reverse hash table, which holds
- * entries from the space's table. In fact, the hash table
- * just uses a field (ie_index) in the table itself.
- *
- * The local hash table is an open-addressing hash table,
- * which means that when a collision occurs, instead of
- * throwing the entry into a bucket, the entry is rehashed
- * to another position in the table. In this case the rehash
- * is very simple: linear probing (ie, just increment the position).
- * This simple rehash makes deletions tractable (they're still a pain),
- * but it means that collisions tend to build up into clumps.
- *
- * Because at least one entry in the table (index 0) is always unused,
- * there will always be room in the reverse hash table. If a table
- * with n slots gets completely full, the reverse hash table will
- * have one giant clump of n-1 slots and one free slot somewhere.
- * Because entries are only entered into the reverse table if they
- * are pure send rights (not receive, send-once, port-set,
- * or dead-name rights), and free entries of course aren't entered,
- * I expect the reverse hash table won't get unreasonably full.
- *
- * Ordered hash tables (Amble & Knuth, Computer Journal, v. 17, no. 2,
- * pp. 135-142.) may be desirable here. They can dramatically help
- * unsuccessful lookups. But unsuccessful lookups are almost always
- * followed by insertions, and those slow down somewhat. They
- * also can help deletions somewhat. Successful lookups aren't affected.
- * So possibly a small win; probably nothing significant.
- */
-
-#define IH_LOCAL_HASH(obj, size) \
- ((((mach_port_index_t) (vm_offset_t) (obj)) >> 6) % (size))
-
-/*
- * Routine: ipc_hash_local_lookup
- * Purpose:
- * Converts (space, obj) -> (name, entry).
- * Looks in the space's local table, for table entries.
- * Returns TRUE if an entry was found.
- * Conditions:
- * The space must be locked (read or write) throughout.
- */
-
-boolean_t
-ipc_hash_local_lookup(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_t *namep,
- ipc_entry_t *entryp)
-{
- ipc_entry_t table;
- ipc_entry_num_t size;
- mach_port_index_t hindex, index;
-
- assert(space != IS_NULL);
- assert(obj != IO_NULL);
-
- table = space->is_table;
- size = space->is_table_size;
- hindex = IH_LOCAL_HASH(obj, size);
-
- /*
- * Ideally, table[hindex].ie_index is the name we want.
- * However, must check ie_object to verify this,
- * because collisions can happen. In case of a collision,
- * search farther along in the clump.
- */
-
- while ((index = table[hindex].ie_index) != 0) {
- ipc_entry_t entry = &table[index];
-
- if (entry->ie_object == obj) {
- *namep = MACH_PORT_MAKEB(index, entry->ie_bits);
- *entryp = entry;
- return TRUE;
- }
-
- if (++hindex == size)
- hindex = 0;
- }
-
- return FALSE;
-}
-
-/*
- * Routine: ipc_hash_local_insert
- * Purpose:
- * Inserts an entry into the space's reverse hash table.
- * Conditions:
- * The space must be write-locked.
- */
-
-void
-ipc_hash_local_insert(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_index_t index,
- ipc_entry_t entry)
-{
- ipc_entry_t table;
- ipc_entry_num_t size;
- mach_port_index_t hindex;
-
- assert(index != 0);
- assert(space != IS_NULL);
- assert(obj != IO_NULL);
-
- table = space->is_table;
- size = space->is_table_size;
- hindex = IH_LOCAL_HASH(obj, size);
-
- assert(entry == &table[index]);
- assert(entry->ie_object == obj);
-
- /*
- * We want to insert at hindex, but there may be collisions.
- * If a collision occurs, search for the end of the clump
- * and insert there.
- */
-
- while (table[hindex].ie_index != 0) {
- if (++hindex == size)
- hindex = 0;
- }
-
- table[hindex].ie_index = index;
-}
-
-/*
- * Routine: ipc_hash_local_delete
- * Purpose:
- * Deletes an entry from the space's reverse hash table.
- * Conditions:
- * The space must be write-locked.
- */
-
-void
-ipc_hash_local_delete(
- ipc_space_t space,
- ipc_object_t obj,
- mach_port_index_t index,
- ipc_entry_t entry)
-{
- ipc_entry_t table;
- ipc_entry_num_t size;
- mach_port_index_t hindex, dindex;
-
- assert(index != MACH_PORT_NULL);
- assert(space != IS_NULL);
- assert(obj != IO_NULL);
-
- table = space->is_table;
- size = space->is_table_size;
- hindex = IH_LOCAL_HASH(obj, size);
-
- assert(entry == &table[index]);
- assert(entry->ie_object == obj);
-
- /*
- * First check we have the right hindex for this index.
- * In case of collision, we have to search farther
- * along in this clump.
- */
-
- while (table[hindex].ie_index != index) {
- if (table[hindex].ie_index == 0)
- {
- static int gak = 0;
- if (gak == 0)
- {
- printf("gak! entry wasn't in hash table!\n");
- gak = 1;
- }
- return;
- }
- if (++hindex == size)
- hindex = 0;
- }
-
- /*
- * Now we want to set table[hindex].ie_index = 0.
- * But if we aren't the last index in a clump,
- * this might cause problems for lookups of objects
- * farther along in the clump that are displaced
- * due to collisions. Searches for them would fail
- * at hindex instead of succeeding.
- *
- * So we must check the clump after hindex for objects
- * that are so displaced, and move one up to the new hole.
- *
- * hindex - index of new hole in the clump
- * dindex - index we are checking for a displaced object
- *
- * When we move a displaced object up into the hole,
- * it creates a new hole, and we have to repeat the process
- * until we get to the end of the clump.
- */
-
- for (dindex = hindex; index != 0; hindex = dindex) {
- for (;;) {
- mach_port_index_t tindex;
- ipc_object_t tobj;
-
- if (++dindex == size)
- dindex = 0;
- assert(dindex != hindex);
-
- /* are we at the end of the clump? */
-
- index = table[dindex].ie_index;
- if (index == 0)
- break;
-
- /* is this a displaced object? */
-
- tobj = table[index].ie_object;
- assert(tobj != IO_NULL);
- tindex = IH_LOCAL_HASH(tobj, size);
-
- if ((dindex < hindex) ?
- ((dindex < tindex) && (tindex <= hindex)) :
- ((dindex < tindex) || (tindex <= hindex)))
- break;
- }
-
- table[hindex].ie_index = index;
- }
-}
-
-/*
- * Routine: ipc_hash_init
- * Purpose:
- * Initialize the reverse hash table implementation.
- */
-
-void
-ipc_hash_init(void)
-{
- ipc_hash_index_t i;
-
- /* initialize ipc_hash_global_size */
-
- ipc_hash_global_size = IPC_HASH_GLOBAL_SIZE;
-
- /* make sure it is a power of two */
-
- ipc_hash_global_mask = ipc_hash_global_size - 1;
- if ((ipc_hash_global_size & ipc_hash_global_mask) != 0) {
- natural_t bit;
-
- /* round up to closest power of two */
-
- for (bit = 1;; bit <<= 1) {
- ipc_hash_global_mask |= bit;
- ipc_hash_global_size = ipc_hash_global_mask + 1;
-
- if ((ipc_hash_global_size & ipc_hash_global_mask) == 0)
- break;
- }
- }
-
- /* allocate ipc_hash_global_table */
-
- ipc_hash_global_table = (ipc_hash_global_bucket_t)
- kalloc((vm_size_t) (ipc_hash_global_size *
- sizeof(struct ipc_hash_global_bucket)));
- assert(ipc_hash_global_table != IHGB_NULL);
-
- /* and initialize it */
-
- for (i = 0; i < ipc_hash_global_size; i++) {
- ipc_hash_global_bucket_t bucket;
-
- bucket = &ipc_hash_global_table[i];
- ihgb_lock_init(bucket);
- bucket->ihgb_head = ITE_NULL;
- }
-}
-
-#if MACH_IPC_DEBUG
-
-/*
- * Routine: ipc_hash_info
- * Purpose:
- * Return information about the global reverse hash table.
- * Fills the buffer with as much information as possible
- * and returns the desired size of the buffer.
- * Conditions:
- * Nothing locked. The caller should provide
- * possibly-pageable memory.
- */
-
-
-ipc_hash_index_t
-ipc_hash_info(
- hash_info_bucket_t *info,
- mach_msg_type_number_t count)
-{
- ipc_hash_index_t i;
-
- if (ipc_hash_global_size < count)
- count = ipc_hash_global_size;
-
- for (i = 0; i < count; i++) {
- ipc_hash_global_bucket_t bucket = &ipc_hash_global_table[i];
- unsigned int bucket_count = 0;
- ipc_tree_entry_t entry;
-
- ihgb_lock(bucket);
- for (entry = bucket->ihgb_head;
- entry != ITE_NULL;
- entry = entry->ite_next)
- bucket_count++;
- ihgb_unlock(bucket);
-
- /* don't touch pageable memory while holding locks */
- info[i].hib_count = bucket_count;
- }
-
- return ipc_hash_global_size;
-}
-
-#endif /* MACH_IPC_DEBUG */
diff --git a/ipc/ipc_hash.h b/ipc/ipc_hash.h
deleted file mode 100644
index 929ba77d..00000000
--- a/ipc/ipc_hash.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: ipc/ipc_hash.h
- * Author: Rich Draves
- * Date: 1989
- *
- * Declarations of entry hash table operations.
- */
-
-#ifndef _IPC_IPC_HASH_H_
-#define _IPC_IPC_HASH_H_
-
-#include <mach/boolean.h>
-#include <mach/kern_return.h>
-
-typedef natural_t ipc_hash_index_t;
-
-extern void
-ipc_hash_init(void);
-
-#if MACH_IPC_DEBUG
-
-extern ipc_hash_index_t
-ipc_hash_info(hash_info_bucket_t *, mach_msg_type_number_t);
-
-#endif /* MACH_IPC_DEBUG */
-
-extern boolean_t
-ipc_hash_lookup(ipc_space_t space, ipc_object_t obj,
- mach_port_t *namep, ipc_entry_t *entryp);
-
-extern void
-ipc_hash_insert(ipc_space_t space, ipc_object_t obj,
- mach_port_t name, ipc_entry_t entry);
-
-extern void
-ipc_hash_delete(ipc_space_t space, ipc_object_t obj,
- mach_port_t name, ipc_entry_t entry);
-
-/*
- * For use by functions that know what they're doing:
- * the global primitives, for splay tree entries,
- * and the local primitives, for table entries.
- */
-
-#define IPC_HASH_GLOBAL_SIZE 256
-
-extern boolean_t
-ipc_hash_global_lookup(ipc_space_t space, ipc_object_t obj,
- mach_port_t *namep, ipc_tree_entry_t *entryp);
-
-extern void
-ipc_hash_global_insert(ipc_space_t space, ipc_object_t obj,
- mach_port_t name, ipc_tree_entry_t entry);
-
-extern void
-ipc_hash_global_delete(ipc_space_t space, ipc_object_t obj,
- mach_port_t name, ipc_tree_entry_t entry);
-
-extern boolean_t
-ipc_hash_local_lookup(ipc_space_t space, ipc_object_t obj,
- mach_port_t *namep, ipc_entry_t *entryp);
-
-extern void
-ipc_hash_local_insert(ipc_space_t space, ipc_object_t obj,
- mach_port_index_t index, ipc_entry_t entry);
-
-extern void
-ipc_hash_local_delete(ipc_space_t space, ipc_object_t obj,
- mach_port_index_t index, ipc_entry_t entry);
-
-#endif /* _IPC_IPC_HASH_H_ */
diff --git a/ipc/ipc_init.c b/ipc/ipc_init.c
index ca7e7912..5ed800f4 100644
--- a/ipc/ipc_init.c
+++ b/ipc/ipc_init.c
@@ -47,14 +47,13 @@
#include <ipc/ipc_marequest.h>
#include <ipc/ipc_notify.h>
#include <ipc/ipc_kmsg.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_init.h>
static struct vm_map ipc_kernel_map_store;
vm_map_t ipc_kernel_map = &ipc_kernel_map_store;
-vm_size_t ipc_kernel_map_size = 8 * 1024 * 1024;
+const vm_size_t ipc_kernel_map_size = 8 * 1024 * 1024;
/*
* Routine: ipc_bootstrap
@@ -74,16 +73,16 @@ ipc_bootstrap(void)
ipc_port_timestamp_data = 0;
kmem_cache_init(&ipc_space_cache, "ipc_space",
- sizeof(struct ipc_space), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_space), 0, NULL, 0);
- kmem_cache_init(&ipc_tree_entry_cache, "ipc_tree_entry",
- sizeof(struct ipc_tree_entry), 0, NULL, NULL, NULL, 0);
+ kmem_cache_init(&ipc_entry_cache, "ipc_entry",
+ sizeof(struct ipc_entry), 0, NULL, 0);
kmem_cache_init(&ipc_object_caches[IOT_PORT], "ipc_port",
- sizeof(struct ipc_port), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_port), 0, NULL, 0);
kmem_cache_init(&ipc_object_caches[IOT_PORT_SET], "ipc_pset",
- sizeof(struct ipc_pset), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_pset), 0, NULL, 0);
/* create special spaces */
@@ -97,7 +96,6 @@ ipc_bootstrap(void)
ipc_table_init();
ipc_notify_init();
- ipc_hash_init();
ipc_marequest_init();
}
@@ -108,7 +106,7 @@ ipc_bootstrap(void)
*/
void
-ipc_init()
+ipc_init(void)
{
vm_offset_t min, max;
diff --git a/ipc/ipc_kmsg.c b/ipc/ipc_kmsg.c
index 3ad274d0..5076809e 100644
--- a/ipc/ipc_kmsg.c
+++ b/ipc/ipc_kmsg.c
@@ -50,7 +50,6 @@
#include <vm/vm_user.h>
#include <ipc/port.h>
#include <ipc/ipc_entry.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_kmsg.h>
#include <ipc/ipc_thread.h>
#include <ipc/ipc_marequest.h>
@@ -69,10 +68,6 @@
#include <ipc/ipc_print.h>
#endif
-extern int copyinmap();
-extern int copyoutmap();
-void ipc_msg_print(); /* forward */
-
#define is_misaligned(x) ( ((vm_offset_t)(x)) & (sizeof(vm_offset_t)-1) )
#define ptr_align(x) \
( ( ((vm_offset_t)(x)) + (sizeof(vm_offset_t)-1) ) & ~(sizeof(vm_offset_t)-1) )
@@ -143,9 +138,7 @@ ipc_kmsg_rmqueue(
next->ikm_prev = prev;
prev->ikm_next = next;
}
- /* XXX Temporary debug logic */
- kmsg->ikm_next = IKM_BOGUS;
- kmsg->ikm_prev = IKM_BOGUS;
+ ikm_mark_bogus (kmsg);
}
/*
@@ -222,9 +215,9 @@ ipc_kmsg_destroy(
*/
void
-ipc_kmsg_clean_body(saddr, eaddr)
- vm_offset_t saddr;
- vm_offset_t eaddr;
+ipc_kmsg_clean_body(
+ vm_offset_t saddr,
+ vm_offset_t eaddr)
{
while (saddr < eaddr) {
mach_msg_type_long_t *type;
@@ -321,8 +314,7 @@ ipc_kmsg_clean_body(saddr, eaddr)
*/
void
-ipc_kmsg_clean(kmsg)
- ipc_kmsg_t kmsg;
+ipc_kmsg_clean(ipc_kmsg_t kmsg)
{
ipc_marequest_t marequest;
ipc_object_t object;
@@ -365,11 +357,11 @@ ipc_kmsg_clean(kmsg)
*/
void
-ipc_kmsg_clean_partial(kmsg, eaddr, dolast, number)
- ipc_kmsg_t kmsg;
- vm_offset_t eaddr;
- boolean_t dolast;
- mach_msg_type_number_t number;
+ipc_kmsg_clean_partial(
+ ipc_kmsg_t kmsg,
+ vm_offset_t eaddr,
+ boolean_t dolast,
+ mach_msg_type_number_t number)
{
ipc_object_t object;
mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
@@ -470,8 +462,7 @@ xxx: type = (mach_msg_type_long_t *) eaddr;
*/
void
-ipc_kmsg_free(kmsg)
- ipc_kmsg_t kmsg;
+ipc_kmsg_free(ipc_kmsg_t kmsg)
{
vm_size_t size = kmsg->ikm_size;
@@ -504,10 +495,10 @@ ipc_kmsg_free(kmsg)
*/
mach_msg_return_t
-ipc_kmsg_get(msg, size, kmsgp)
- mach_msg_header_t *msg;
- mach_msg_size_t size;
- ipc_kmsg_t *kmsgp;
+ipc_kmsg_get(
+ mach_msg_header_t *msg,
+ mach_msg_size_t size,
+ ipc_kmsg_t *kmsgp)
{
ipc_kmsg_t kmsg;
@@ -556,10 +547,10 @@ ipc_kmsg_get(msg, size, kmsgp)
*/
extern mach_msg_return_t
-ipc_kmsg_get_from_kernel(msg, size, kmsgp)
- mach_msg_header_t *msg;
- mach_msg_size_t size;
- ipc_kmsg_t *kmsgp;
+ipc_kmsg_get_from_kernel(
+ mach_msg_header_t *msg,
+ mach_msg_size_t size,
+ ipc_kmsg_t *kmsgp)
{
ipc_kmsg_t kmsg;
@@ -593,10 +584,10 @@ ipc_kmsg_get_from_kernel(msg, size, kmsgp)
*/
mach_msg_return_t
-ipc_kmsg_put(msg, kmsg, size)
- mach_msg_header_t *msg;
- ipc_kmsg_t kmsg;
- mach_msg_size_t size;
+ipc_kmsg_put(
+ mach_msg_header_t *msg,
+ ipc_kmsg_t kmsg,
+ mach_msg_size_t size)
{
mach_msg_return_t mr;
@@ -678,10 +669,10 @@ ipc_kmsg_put_to_kernel(
*/
mach_msg_return_t
-ipc_kmsg_copyin_header(msg, space, notify)
- mach_msg_header_t *msg;
- ipc_space_t space;
- mach_port_t notify;
+ipc_kmsg_copyin_header(
+ mach_msg_header_t *msg,
+ ipc_space_t space,
+ mach_port_t notify)
{
mach_msg_bits_t mbits = msg->msgh_bits &~ MACH_MSGH_BITS_CIRCULAR;
mach_port_t dest_name = msg->msgh_remote_port;
@@ -706,24 +697,14 @@ ipc_kmsg_copyin_header(msg, space, notify)
if (!space->is_active)
goto abort_async;
- /* optimized ipc_entry_lookup */
-
- {
- mach_port_index_t index = MACH_PORT_INDEX(dest_name);
- mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
-
- if (index >= space->is_table_size)
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
goto abort_async;
-
- entry = &space->is_table[index];
bits = entry->ie_bits;
- /* check generation number and type bit */
-
- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
- (gen | MACH_PORT_TYPE_SEND))
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
goto abort_async;
- }
/* optimized ipc_right_copyin */
@@ -758,8 +739,6 @@ ipc_kmsg_copyin_header(msg, space, notify)
case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
MACH_MSG_TYPE_MAKE_SEND_ONCE): {
- ipc_entry_num_t size;
- ipc_entry_t table;
ipc_entry_t entry;
ipc_entry_bits_t bits;
ipc_port_t dest_port, reply_port;
@@ -770,51 +749,28 @@ ipc_kmsg_copyin_header(msg, space, notify)
if (!space->is_active)
goto abort_request;
- size = space->is_table_size;
- table = space->is_table;
-
- /* optimized ipc_entry_lookup of dest_name */
-
- {
- mach_port_index_t index = MACH_PORT_INDEX(dest_name);
- mach_port_gen_t gen = MACH_PORT_GEN(dest_name);
-
- if (index >= size)
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
goto abort_request;
-
- entry = &table[index];
bits = entry->ie_bits;
- /* check generation number and type bit */
-
- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
- (gen | MACH_PORT_TYPE_SEND))
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
goto abort_request;
- }
assert(IE_BITS_UREFS(bits) > 0);
dest_port = (ipc_port_t) entry->ie_object;
assert(dest_port != IP_NULL);
- /* optimized ipc_entry_lookup of reply_name */
-
- {
- mach_port_index_t index = MACH_PORT_INDEX(reply_name);
- mach_port_gen_t gen = MACH_PORT_GEN(reply_name);
-
- if (index >= size)
+ entry = ipc_entry_lookup (space, reply_name);
+ if (entry == IE_NULL)
goto abort_request;
-
- entry = &table[index];
bits = entry->ie_bits;
- /* check generation number and type bit */
-
- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_RECEIVE)) !=
- (gen | MACH_PORT_TYPE_RECEIVE))
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_RECEIVE)
goto abort_request;
- }
reply_port = (ipc_port_t) entry->ie_object;
assert(reply_port != IP_NULL);
@@ -861,9 +817,6 @@ ipc_kmsg_copyin_header(msg, space, notify)
}
case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
- mach_port_index_t index;
- mach_port_gen_t gen;
- ipc_entry_t table;
ipc_entry_t entry;
ipc_entry_bits_t bits;
ipc_port_t dest_port;
@@ -877,24 +830,13 @@ ipc_kmsg_copyin_header(msg, space, notify)
if (!space->is_active)
goto abort_reply;
- /* optimized ipc_entry_lookup */
-
- table = space->is_table;
-
- index = MACH_PORT_INDEX(dest_name);
- gen = MACH_PORT_GEN(dest_name);
-
- if (index >= space->is_table_size)
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
goto abort_reply;
-
- entry = &table[index];
bits = entry->ie_bits;
- /* check generation number, collision bit, and type bit */
-
- if ((bits & (IE_BITS_GEN_MASK|IE_BITS_COLLISION|
- MACH_PORT_TYPE_SEND_ONCE)) !=
- (gen | MACH_PORT_TYPE_SEND_ONCE))
+ /* check and type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND_ONCE)
goto abort_reply;
/* optimized ipc_right_copyin */
@@ -918,12 +860,8 @@ ipc_kmsg_copyin_header(msg, space, notify)
assert(dest_port->ip_sorights > 0);
ip_unlock(dest_port);
- /* optimized ipc_entry_dealloc */
-
- entry->ie_next = table->ie_next;
- table->ie_next = index;
- entry->ie_bits = gen;
entry->ie_object = IO_NULL;
+ ipc_entry_dealloc (space, dest_name, entry);
is_write_unlock(space);
msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
@@ -1343,10 +1281,10 @@ ipc_kmsg_copyin_header(msg, space, notify)
}
mach_msg_return_t
-ipc_kmsg_copyin_body(kmsg, space, map)
- ipc_kmsg_t kmsg;
- ipc_space_t space;
- vm_map_t map;
+ipc_kmsg_copyin_body(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map)
{
ipc_object_t dest;
vm_offset_t saddr, eaddr;
@@ -1563,11 +1501,11 @@ ipc_kmsg_copyin_body(kmsg, space, map)
*/
mach_msg_return_t
-ipc_kmsg_copyin(kmsg, space, map, notify)
- ipc_kmsg_t kmsg;
- ipc_space_t space;
- vm_map_t map;
- mach_port_t notify;
+ipc_kmsg_copyin(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map,
+ mach_port_t notify)
{
mach_msg_return_t mr;
@@ -1598,8 +1536,7 @@ ipc_kmsg_copyin(kmsg, space, map, notify)
*/
void
-ipc_kmsg_copyin_from_kernel(
- ipc_kmsg_t kmsg)
+ipc_kmsg_copyin_from_kernel(ipc_kmsg_t kmsg)
{
mach_msg_bits_t bits = kmsg->ikm_header.msgh_bits;
mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
@@ -1758,10 +1695,10 @@ ipc_kmsg_copyin_from_kernel(
*/
mach_msg_return_t
-ipc_kmsg_copyout_header(msg, space, notify)
- mach_msg_header_t *msg;
- ipc_space_t space;
- mach_port_t notify;
+ipc_kmsg_copyout_header(
+ mach_msg_header_t *msg,
+ ipc_space_t space,
+ mach_port_t notify)
{
mach_msg_bits_t mbits = msg->msgh_bits;
ipc_port_t dest = (ipc_port_t) msg->msgh_remote_port;
@@ -1775,6 +1712,7 @@ ipc_kmsg_copyout_header(msg, space, notify)
case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0): {
mach_port_t dest_name;
ipc_port_t nsrequest;
+ unsigned long payload;
/* receiving an asynchronous message */
@@ -1793,6 +1731,7 @@ ipc_kmsg_copyout_header(msg, space, notify)
dest_name = dest->ip_receiver_name;
else
dest_name = MACH_PORT_NULL;
+ payload = dest->ip_protected_payload;
if ((--dest->ip_srights == 0) &&
((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
@@ -1806,21 +1745,27 @@ ipc_kmsg_copyout_header(msg, space, notify)
} else
ip_unlock(dest);
- msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
- MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND));
- msg->msgh_local_port = dest_name;
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(
+ 0, MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
msg->msgh_remote_port = MACH_PORT_NULL;
return MACH_MSG_SUCCESS;
}
case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND,
MACH_MSG_TYPE_PORT_SEND_ONCE): {
- ipc_entry_t table;
- mach_port_index_t index;
ipc_entry_t entry;
ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
mach_port_t dest_name, reply_name;
ipc_port_t nsrequest;
+ unsigned long payload;
/* receiving a request message */
@@ -1828,8 +1773,7 @@ ipc_kmsg_copyout_header(msg, space, notify)
break;
is_write_lock(space);
- if (!space->is_active ||
- ((index = (table = space->is_table)->ie_next) == 0)) {
+ if (!space->is_active || space->is_free_list == NULL) {
is_write_unlock(space);
break;
}
@@ -1859,11 +1803,14 @@ ipc_kmsg_copyout_header(msg, space, notify)
assert(reply->ip_sorights > 0);
ip_unlock(reply);
- /* optimized ipc_entry_get */
-
- entry = &table[index];
- table->ie_next = entry->ie_next;
- entry->ie_request = 0;
+ kern_return_t kr;
+ kr = ipc_entry_get (space, &reply_name, &entry);
+ if (kr) {
+ ip_unlock(reply);
+ ip_unlock(dest);
+ is_write_unlock(space);
+ break;
+ }
{
mach_port_gen_t gen;
@@ -1871,8 +1818,6 @@ ipc_kmsg_copyout_header(msg, space, notify)
assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
gen = entry->ie_bits + IE_BITS_GEN_ONE;
- reply_name = MACH_PORT_MAKE(index, gen);
-
/* optimized ipc_right_copyout */
entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
@@ -1891,6 +1836,7 @@ ipc_kmsg_copyout_header(msg, space, notify)
dest_name = dest->ip_receiver_name;
else
dest_name = MACH_PORT_NULL;
+ payload = dest->ip_protected_payload;
if ((--dest->ip_srights == 0) &&
((nsrequest = dest->ip_nsrequest) != IP_NULL)) {
@@ -1904,16 +1850,24 @@ ipc_kmsg_copyout_header(msg, space, notify)
} else
ip_unlock(dest);
- msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
- MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
- MACH_MSG_TYPE_PORT_SEND));
- msg->msgh_local_port = dest_name;
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
msg->msgh_remote_port = reply_name;
return MACH_MSG_SUCCESS;
}
case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
mach_port_t dest_name;
+ unsigned long payload;
/* receiving a reply message */
@@ -1927,6 +1881,8 @@ ipc_kmsg_copyout_header(msg, space, notify)
assert(dest->ip_sorights > 0);
+ payload = dest->ip_protected_payload;
+
if (dest->ip_receiver == space) {
ip_release(dest);
dest->ip_sorights--;
@@ -1939,9 +1895,17 @@ ipc_kmsg_copyout_header(msg, space, notify)
dest_name = MACH_PORT_NULL;
}
- msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
- MACH_MSGH_BITS(0, MACH_MSG_TYPE_PORT_SEND_ONCE));
- msg->msgh_local_port = dest_name;
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(0,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
msg->msgh_remote_port = MACH_PORT_NULL;
return MACH_MSG_SUCCESS;
}
@@ -1957,6 +1921,7 @@ ipc_kmsg_copyout_header(msg, space, notify)
mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
ipc_port_t reply = (ipc_port_t) msg->msgh_local_port;
mach_port_t dest_name, reply_name;
+ unsigned long payload;
if (IP_VALID(reply)) {
ipc_port_t notify_port;
@@ -2037,28 +2002,20 @@ ipc_kmsg_copyout_header(msg, space, notify)
goto copyout_dest;
}
- kr = ipc_entry_get(space, &reply_name, &entry);
+ kr = ipc_entry_alloc(space, &reply_name, &entry);
if (kr != KERN_SUCCESS) {
ip_unlock(reply);
if (notify_port != IP_NULL)
ipc_port_release_sonce(notify_port);
- /* space is locked */
- kr = ipc_entry_grow_table(space);
- if (kr != KERN_SUCCESS) {
- /* space is unlocked */
-
- if (kr == KERN_RESOURCE_SHORTAGE)
- return (MACH_RCV_HEADER_ERROR|
- MACH_MSG_IPC_KERNEL);
- else
- return (MACH_RCV_HEADER_ERROR|
- MACH_MSG_IPC_SPACE);
- }
- /* space is locked again; start over */
-
- continue;
+ is_write_unlock(space);
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_KERNEL);
+ else
+ return (MACH_RCV_HEADER_ERROR|
+ MACH_MSG_IPC_SPACE);
}
assert(IE_BITS_TYPE(entry->ie_bits)
@@ -2203,6 +2160,7 @@ ipc_kmsg_copyout_header(msg, space, notify)
*/
copyout_dest:
+ payload = dest->ip_protected_payload;
if (ip_active(dest)) {
ipc_object_copyout_dest(space, (ipc_object_t) dest,
@@ -2231,9 +2189,17 @@ ipc_kmsg_copyout_header(msg, space, notify)
if (IP_VALID(reply))
ipc_port_release(reply);
- msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
- MACH_MSGH_BITS(reply_type, dest_type));
- msg->msgh_local_port = dest_name;
+ if (! ipc_port_flag_protected_payload(dest)) {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ msg->msgh_local_port = dest_name;
+ } else {
+ msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ MACH_MSGH_BITS(reply_type,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD));
+ msg->msgh_protected_payload = payload;
+ }
+
msg->msgh_remote_port = reply_name;
}
@@ -2258,11 +2224,11 @@ ipc_kmsg_copyout_header(msg, space, notify)
*/
mach_msg_return_t
-ipc_kmsg_copyout_object(space, object, msgt_name, namep)
- ipc_space_t space;
- ipc_object_t object;
- mach_msg_type_name_t msgt_name;
- mach_port_t *namep;
+ipc_kmsg_copyout_object(
+ ipc_space_t space,
+ ipc_object_t object,
+ mach_msg_type_name_t msgt_name,
+ mach_port_t *namep)
{
if (!IO_VALID(object)) {
*namep = (mach_port_t) object;
@@ -2280,7 +2246,7 @@ ipc_kmsg_copyout_object(space, object, msgt_name, namep)
goto slow_copyout;
{
- register ipc_port_t port = (ipc_port_t) object;
+ ipc_port_t port = (ipc_port_t) object;
ipc_entry_t entry;
is_write_lock(space);
@@ -2291,12 +2257,13 @@ ipc_kmsg_copyout_object(space, object, msgt_name, namep)
ip_lock(port);
if (!ip_active(port) ||
- !ipc_hash_local_lookup(space, (ipc_object_t) port,
- namep, &entry)) {
+ (entry = ipc_reverse_lookup(space,
+ (ipc_object_t) port)) == NULL) {
ip_unlock(port);
is_write_unlock(space);
goto slow_copyout;
}
+ *namep = entry->ie_name;
/*
* Copyout the send right, incrementing urefs
@@ -2313,7 +2280,7 @@ ipc_kmsg_copyout_object(space, object, msgt_name, namep)
assert(IE_BITS_UREFS(entry->ie_bits) < MACH_PORT_UREFS_MAX);
{
- register ipc_entry_bits_t bits = entry->ie_bits + 1;
+ ipc_entry_bits_t bits = entry->ie_bits + 1;
if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX)
entry->ie_bits = bits;
@@ -2368,10 +2335,11 @@ ipc_kmsg_copyout_object(space, object, msgt_name, namep)
*/
mach_msg_return_t
-ipc_kmsg_copyout_body(saddr, eaddr, space, map)
- vm_offset_t saddr, eaddr;
- ipc_space_t space;
- vm_map_t map;
+ipc_kmsg_copyout_body(
+ vm_offset_t saddr,
+ vm_offset_t eaddr,
+ ipc_space_t space,
+ vm_map_t map)
{
mach_msg_return_t mr = MACH_MSG_SUCCESS;
kern_return_t kr;
@@ -2520,11 +2488,11 @@ ipc_kmsg_copyout_body(saddr, eaddr, space, map)
*/
mach_msg_return_t
-ipc_kmsg_copyout(kmsg, space, map, notify)
- ipc_kmsg_t kmsg;
- ipc_space_t space;
- vm_map_t map;
- mach_port_t notify;
+ipc_kmsg_copyout(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map,
+ mach_port_t notify)
{
mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
mach_msg_return_t mr;
@@ -2614,9 +2582,9 @@ ipc_kmsg_copyout_pseudo(
*/
void
-ipc_kmsg_copyout_dest(kmsg, space)
- ipc_kmsg_t kmsg;
- ipc_space_t space;
+ipc_kmsg_copyout_dest(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space)
{
mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
@@ -2662,9 +2630,9 @@ ipc_kmsg_copyout_dest(kmsg, space)
#if MACH_KDB
char *
-ipc_type_name(type_name, received)
- int type_name;
- boolean_t received;
+ipc_type_name(
+ int type_name,
+ boolean_t received)
{
switch (type_name) {
case MACH_MSG_TYPE_BOOLEAN:
@@ -2745,8 +2713,7 @@ ipc_print_type_name(
* ipc_kmsg_print [ debug ]
*/
void
-ipc_kmsg_print(kmsg)
- ipc_kmsg_t kmsg;
+ipc_kmsg_print(ipc_kmsg_t kmsg)
{
db_printf("kmsg=0x%x\n", kmsg);
db_printf("ikm_next=0x%x,prev=0x%x,size=%d,marequest=0x%x",
@@ -2762,8 +2729,7 @@ ipc_kmsg_print(kmsg)
* ipc_msg_print [ debug ]
*/
void
-ipc_msg_print(msgh)
- mach_msg_header_t *msgh;
+ipc_msg_print(mach_msg_header_t *msgh)
{
vm_offset_t saddr, eaddr;
diff --git a/ipc/ipc_kmsg.h b/ipc/ipc_kmsg.h
index 8867310d..393c0392 100644
--- a/ipc/ipc_kmsg.h
+++ b/ipc/ipc_kmsg.h
@@ -38,7 +38,7 @@
#include <mach/message.h>
#include <kern/assert.h>
#include <kern/cpu_number.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/kalloc.h>
#include <ipc/ipc_marequest.h>
#include <ipc/ipc_object.h>
@@ -72,11 +72,24 @@ typedef struct ipc_kmsg {
#define ikm_plus_overhead(size) ((vm_size_t)((size) + IKM_OVERHEAD))
#define ikm_less_overhead(size) ((mach_msg_size_t)((size) - IKM_OVERHEAD))
+#if MACH_IPC_TEST
/*
- * XXX For debugging.
+ * For debugging.
*/
#define IKM_BOGUS ((ipc_kmsg_t) 0xffffff10)
+#define ikm_mark_bogus(kmsg) \
+MACRO_BEGIN \
+ (kmsg)->ikm_next = IKM_BOGUS; \
+ (kmsg)->ikm_prev = IKM_BOGUS; \
+MACRO_END
+
+#else /* MACH_IPC_TEST */
+
+#define ikm_mark_bogus(kmsg) ;
+
+#endif /* MACH_IPC_TEST */
+
/*
* We keep a per-processor cache of kernel message buffers.
* The cache saves the overhead/locking of using kalloc/kfree.
@@ -92,9 +105,12 @@ extern ipc_kmsg_t ipc_kmsg_cache[NCPUS];
/*
* The size of the kernel message buffers that will be cached.
* IKM_SAVED_KMSG_SIZE includes overhead; IKM_SAVED_MSG_SIZE doesn't.
+ *
+ * We use the page size for IKM_SAVED_KMSG_SIZE to make sure the
+ * page is pinned to a single processor.
*/
-#define IKM_SAVED_KMSG_SIZE ((vm_size_t) 256)
+#define IKM_SAVED_KMSG_SIZE PAGE_SIZE
#define IKM_SAVED_MSG_SIZE ikm_less_overhead(IKM_SAVED_KMSG_SIZE)
#define ikm_alloc(size) \
@@ -140,8 +156,7 @@ MACRO_BEGIN \
MACRO_END
/*
- * struct ipc_kmsg_queue is defined in kern/thread.h instead of here,
- * so that kern/thread.h doesn't have to include ipc/ipc_kmsg.h.
+ * struct ipc_kmsg_queue is defined in ipc/ipc_kmsg_queue.h
*/
#include <ipc/ipc_kmsg_queue.h>
@@ -196,9 +211,7 @@ MACRO_BEGIN \
_next->ikm_prev = _prev; \
_prev->ikm_next = _next; \
} \
- /* XXX Debug paranoia */ \
- kmsg->ikm_next = IKM_BOGUS; \
- kmsg->ikm_prev = IKM_BOGUS; \
+ ikm_mark_bogus (kmsg); \
MACRO_END
#define ipc_kmsg_enqueue_macro(queue, kmsg) \
diff --git a/ipc/ipc_kmsg_queue.h b/ipc/ipc_kmsg_queue.h
index 51ccbe24..b4b3df1d 100644
--- a/ipc/ipc_kmsg_queue.h
+++ b/ipc/ipc_kmsg_queue.h
@@ -27,5 +27,5 @@
#define _IPC_KMSG_QUEUE_H_
struct ipc_kmsg_queue {
struct ipc_kmsg *ikmq_base; };
-#endif
+#endif /* _IPC_KMSG_QUEUE_H_ */
diff --git a/ipc/ipc_machdep.h b/ipc/ipc_machdep.h
index e864c4b0..c205ba45 100755
--- a/ipc/ipc_machdep.h
+++ b/ipc/ipc_machdep.h
@@ -24,6 +24,9 @@
* the rights to redistribute these changes.
*/
+#ifndef _IPC_IPC_MACHDEP_H_
+#define _IPC_IPC_MACHDEP_H_
+
/*
* At times, we need to know the size of a port in bits
*/
@@ -38,3 +41,4 @@
#define PORT_T_SIZE_IN_BITS 32
#endif
+#endif /* _IPC_IPC_MACHDEP_H_ */
diff --git a/ipc/ipc_marequest.c b/ipc/ipc_marequest.c
index 06c53eb4..736db838 100644
--- a/ipc/ipc_marequest.c
+++ b/ipc/ipc_marequest.c
@@ -137,7 +137,7 @@ ipc_marequest_init(void)
}
kmem_cache_init(&ipc_marequest_cache, "ipc_marequest",
- sizeof(struct ipc_marequest), 0, NULL, NULL, NULL, 0);
+ sizeof(struct ipc_marequest), 0, NULL, 0);
}
/*
@@ -160,11 +160,11 @@ ipc_marequest_init(void)
*/
mach_msg_return_t
-ipc_marequest_create(space, port, notify, marequestp)
- ipc_space_t space;
- ipc_port_t port;
- mach_port_t notify;
- ipc_marequest_t *marequestp;
+ipc_marequest_create(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_t notify,
+ ipc_marequest_t *marequestp)
{
mach_port_t name;
ipc_entry_t entry;
@@ -256,9 +256,9 @@ ipc_marequest_create(space, port, notify, marequestp)
*/
void
-ipc_marequest_cancel(space, name)
- ipc_space_t space;
- mach_port_t name;
+ipc_marequest_cancel(
+ ipc_space_t space,
+ mach_port_t name)
{
ipc_marequest_bucket_t bucket;
ipc_marequest_t marequest, *last;
@@ -292,9 +292,10 @@ ipc_marequest_cancel(space, name)
*/
void
-ipc_marequest_rename(space, old, new)
- ipc_space_t space;
- mach_port_t old, new;
+ipc_marequest_rename(
+ ipc_space_t space,
+ mach_port_t old,
+ mach_port_t new)
{
ipc_marequest_bucket_t bucket;
ipc_marequest_t marequest, *last;
@@ -336,8 +337,7 @@ ipc_marequest_rename(space, old, new)
*/
void
-ipc_marequest_destroy(marequest)
- ipc_marequest_t marequest;
+ipc_marequest_destroy(ipc_marequest_t marequest)
{
ipc_space_t space = marequest->imar_space;
mach_port_t name;
@@ -404,10 +404,10 @@ ipc_marequest_destroy(marequest)
*/
unsigned int
-ipc_marequest_info(maxp, info, count)
- unsigned int *maxp;
- hash_info_bucket_t *info;
- unsigned int count;
+ipc_marequest_info(
+ unsigned int *maxp,
+ hash_info_bucket_t *info,
+ unsigned int count)
{
ipc_marequest_index_t i;
diff --git a/ipc/ipc_mqueue.c b/ipc/ipc_mqueue.c
index 80a34d3a..9138aec4 100644
--- a/ipc/ipc_mqueue.c
+++ b/ipc/ipc_mqueue.c
@@ -79,9 +79,9 @@ ipc_mqueue_init(
void
ipc_mqueue_move(
- ipc_mqueue_t dest,
- ipc_mqueue_t source,
- ipc_port_t port)
+ ipc_mqueue_t dest,
+ ipc_mqueue_t source,
+ const ipc_port_t port)
{
ipc_kmsg_queue_t oldq, newq;
ipc_thread_queue_t blockedq;
@@ -171,10 +171,10 @@ ipc_mqueue_changed(
*/
mach_msg_return_t
-ipc_mqueue_send(kmsg, option, time_out)
- ipc_kmsg_t kmsg;
- mach_msg_option_t option;
- mach_msg_timeout_t time_out;
+ipc_mqueue_send(
+ ipc_kmsg_t kmsg,
+ mach_msg_option_t option,
+ mach_msg_timeout_t time_out)
{
ipc_port_t port;
diff --git a/ipc/ipc_mqueue.h b/ipc/ipc_mqueue.h
index ef0f9425..2af5e02e 100644
--- a/ipc/ipc_mqueue.h
+++ b/ipc/ipc_mqueue.h
@@ -37,7 +37,8 @@
#include <mach/message.h>
#include <kern/assert.h>
#include <kern/lock.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
+#include <ipc/ipc_kmsg_queue.h>
#include <ipc/ipc_kmsg.h>
#include <ipc/ipc_thread.h>
diff --git a/ipc/ipc_notify.c b/ipc/ipc_notify.c
index 25fa421b..df5f68bc 100644
--- a/ipc/ipc_notify.c
+++ b/ipc/ipc_notify.c
@@ -59,8 +59,7 @@ mach_dead_name_notification_t ipc_notify_dead_name_template;
*/
void
-ipc_notify_init_port_deleted(n)
- mach_port_deleted_notification_t *n;
+ipc_notify_init_port_deleted(mach_port_deleted_notification_t *n)
{
mach_msg_header_t *m = &n->not_header;
mach_msg_type_t *t = &n->not_type;
@@ -90,8 +89,7 @@ ipc_notify_init_port_deleted(n)
*/
void
-ipc_notify_init_msg_accepted(n)
- mach_msg_accepted_notification_t *n;
+ipc_notify_init_msg_accepted(mach_msg_accepted_notification_t *n)
{
mach_msg_header_t *m = &n->not_header;
mach_msg_type_t *t = &n->not_type;
@@ -121,8 +119,7 @@ ipc_notify_init_msg_accepted(n)
*/
void
-ipc_notify_init_port_destroyed(
- mach_port_destroyed_notification_t *n)
+ipc_notify_init_port_destroyed(mach_port_destroyed_notification_t *n)
{
mach_msg_header_t *m = &n->not_header;
mach_msg_type_t *t = &n->not_type;
@@ -255,9 +252,9 @@ ipc_notify_init(void)
*/
void
-ipc_notify_port_deleted(port, name)
- ipc_port_t port;
- mach_port_t name;
+ipc_notify_port_deleted(
+ ipc_port_t port,
+ mach_port_t name)
{
ipc_kmsg_t kmsg;
mach_port_deleted_notification_t *n;
@@ -289,9 +286,9 @@ ipc_notify_port_deleted(port, name)
*/
void
-ipc_notify_msg_accepted(port, name)
- ipc_port_t port;
- mach_port_t name;
+ipc_notify_msg_accepted(
+ ipc_port_t port,
+ mach_port_t name)
{
ipc_kmsg_t kmsg;
mach_msg_accepted_notification_t *n;
@@ -326,9 +323,9 @@ ipc_notify_msg_accepted(port, name)
*/
void
-ipc_notify_port_destroyed(port, right)
- ipc_port_t port;
- ipc_port_t right;
+ipc_notify_port_destroyed(
+ ipc_port_t port,
+ ipc_port_t right)
{
ipc_kmsg_t kmsg;
mach_port_destroyed_notification_t *n;
@@ -362,9 +359,9 @@ ipc_notify_port_destroyed(port, right)
*/
void
-ipc_notify_no_senders(port, mscount)
- ipc_port_t port;
- mach_port_mscount_t mscount;
+ipc_notify_no_senders(
+ ipc_port_t port,
+ mach_port_mscount_t mscount)
{
ipc_kmsg_t kmsg;
mach_no_senders_notification_t *n;
@@ -396,8 +393,7 @@ ipc_notify_no_senders(port, mscount)
*/
void
-ipc_notify_send_once(port)
- ipc_port_t port;
+ipc_notify_send_once(ipc_port_t port)
{
ipc_kmsg_t kmsg;
mach_send_once_notification_t *n;
@@ -428,9 +424,9 @@ ipc_notify_send_once(port)
*/
void
-ipc_notify_dead_name(port, name)
- ipc_port_t port;
- mach_port_t name;
+ipc_notify_dead_name(
+ ipc_port_t port,
+ mach_port_t name)
{
ipc_kmsg_t kmsg;
mach_dead_name_notification_t *n;
diff --git a/ipc/ipc_object.c b/ipc/ipc_object.c
index b8cae8f5..a6457c37 100644
--- a/ipc/ipc_object.c
+++ b/ipc/ipc_object.c
@@ -41,7 +41,6 @@
#include <ipc/ipc_space.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_object.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_right.h>
#include <ipc/ipc_notify.h>
#include <ipc/ipc_pset.h>
@@ -156,11 +155,12 @@ ipc_object_alloc_dead(
ipc_entry_t entry;
kern_return_t kr;
-
+ is_write_lock(space);
kr = ipc_entry_alloc(space, namep, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
return kr;
- /* space is write-locked */
+ }
/* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
@@ -192,11 +192,12 @@ ipc_object_alloc_dead_name(
ipc_entry_t entry;
kern_return_t kr;
-
+ is_write_lock(space);
kr = ipc_entry_alloc_name(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
return kr;
- /* space is write-locked */
+ }
if (ipc_right_inuse(space, name, entry))
return KERN_NAME_EXISTS;
@@ -255,12 +256,13 @@ ipc_object_alloc(
memset(pset, 0, sizeof(*pset));
}
+ is_write_lock(space);
kr = ipc_entry_alloc(space, namep, &entry);
if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
io_free(otype, object);
return kr;
}
- /* space is write-locked */
entry->ie_bits |= type | urefs;
entry->ie_object = object;
@@ -322,12 +324,13 @@ ipc_object_alloc_name(
memset(pset, 0, sizeof(*pset));
}
+ is_write_lock(space);
kr = ipc_entry_alloc_name(space, name, &entry);
if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
io_free(otype, object);
return kr;
}
- /* space is write-locked */
if (ipc_right_inuse(space, name, entry)) {
io_free(otype, object);
@@ -481,6 +484,7 @@ ipc_object_copyin_from_kernel(
port->ip_receiver_name = MACH_PORT_NULL;
port->ip_destination = IP_NULL;
+ ipc_port_flag_protected_payload_clear(port);
ip_unlock(port);
break;
}
@@ -629,15 +633,10 @@ ipc_object_copyout(
break;
}
- kr = ipc_entry_get(space, &name, &entry);
+ kr = ipc_entry_alloc(space, &name, &entry);
if (kr != KERN_SUCCESS) {
- /* unlocks/locks space, so must start again */
-
- kr = ipc_entry_grow_table(space);
- if (kr != KERN_SUCCESS)
- return kr; /* space is unlocked */
-
- continue;
+ is_write_unlock(space);
+ return kr;
}
assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
@@ -690,15 +689,10 @@ ipc_object_copyout_multiname(space, object, namep)
return KERN_INVALID_TASK;
}
- kr = ipc_entry_get(space, &name, &entry);
+ kr = ipc_entry_alloc(space, &name, &entry);
if (kr != KERN_SUCCESS) {
- /* unlocks/locks space, so must start again */
-
- kr = ipc_entry_grow_table(space);
- if (kr != KERN_SUCCESS)
- return kr; /* space is unlocked */
-
- continue;
+ is_write_unlock(space);
+ return kr; /* space is unlocked */
}
assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
@@ -763,10 +757,12 @@ ipc_object_copyout_name(
assert(IO_VALID(object));
assert(io_otype(object) == IOT_PORT);
+ is_write_lock(space);
kr = ipc_entry_alloc_name(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
return kr;
- /* space is write-locked and active */
+ }
if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
ipc_right_reverse(space, object, &oname, &oentry)) {
@@ -940,10 +936,12 @@ ipc_object_rename(
ipc_entry_t oentry, nentry;
kern_return_t kr;
+ is_write_lock(space);
kr = ipc_entry_alloc_name(space, nname, &nentry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ is_write_unlock(space);
return kr;
- /* space is write-locked and active */
+ }
if (ipc_right_inuse(space, nname, nentry)) {
/* space is unlocked */
@@ -1000,14 +998,15 @@ char *ikot_print_array[IKOT_MAX_TYPE] = {
"(SEMAPHORE) ",
"(LOCK_SET) ",
"(CLOCK) ",
- "(CLOCK_CTRL) ", /* 26 */
+ "(CLOCK_CTRL) ",
+ "(PAGER_PROXY) ", /* 27 */
/* << new entries here */
"(UNKNOWN) " /* magic catchall */
}; /* Please keep in sync with kern/ipc_kobject.h */
void
ipc_object_print(
- ipc_object_t object)
+ const ipc_object_t object)
{
int kotype;
diff --git a/ipc/ipc_object.h b/ipc/ipc_object.h
index adf5bca4..be5bea71 100644
--- a/ipc/ipc_object.h
+++ b/ipc/ipc_object.h
@@ -38,7 +38,7 @@
#include <mach/message.h>
#include <ipc/ipc_types.h>
#include <kern/lock.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/slab.h>
typedef unsigned int ipc_object_refs_t;
@@ -57,7 +57,9 @@ typedef struct ipc_object {
#define IO_VALID(io) (((io) != IO_NULL) && ((io) != IO_DEAD))
#define IO_BITS_KOTYPE 0x0000ffff /* used by the object */
-#define IO_BITS_OTYPE 0x7fff0000 /* determines a cache */
+#define IO_BITS_OTYPE 0x3fff0000 /* determines a cache */
+/* The following masks are used to store attributes of ipc ports. */
+#define IO_BITS_PROTECTED_PAYLOAD 0x40000000 /* pp set? */
#define IO_BITS_ACTIVE 0x80000000U /* is object alive? */
#define io_active(io) ((int)(io)->io_bits < 0) /* hack */
diff --git a/ipc/ipc_port.c b/ipc/ipc_port.c
index b9607395..86a4ee2a 100644
--- a/ipc/ipc_port.c
+++ b/ipc/ipc_port.c
@@ -94,11 +94,11 @@ ipc_port_timestamp(void)
*/
kern_return_t
-ipc_port_dnrequest(port, name, soright, indexp)
- ipc_port_t port;
- mach_port_t name;
- ipc_port_t soright;
- ipc_port_request_index_t *indexp;
+ipc_port_dnrequest(
+ ipc_port_t port,
+ mach_port_t name,
+ ipc_port_t soright,
+ ipc_port_request_index_t *indexp)
{
ipc_port_request_t ipr, table;
ipc_port_request_index_t index;
@@ -142,8 +142,7 @@ ipc_port_dnrequest(port, name, soright, indexp)
*/
kern_return_t
-ipc_port_dngrow(port)
- ipc_port_t port;
+ipc_port_dngrow(ipc_port_t port)
{
ipc_table_size_t its;
ipc_port_request_t otable, ntable;
@@ -275,9 +274,9 @@ ipc_port_dncancel(
void
ipc_port_pdrequest(
- ipc_port_t port,
- ipc_port_t notify,
- ipc_port_t *previousp)
+ ipc_port_t port,
+ const ipc_port_t notify,
+ ipc_port_t *previousp)
{
ipc_port_t previous;
@@ -382,8 +381,7 @@ ipc_port_set_qlimit(
*/
ipc_mqueue_t
-ipc_port_lock_mqueue(port)
- ipc_port_t port;
+ipc_port_lock_mqueue(ipc_port_t port)
{
if (port->ip_pset != IPS_NULL) {
ipc_pset_t pset = port->ip_pset;
@@ -413,9 +411,9 @@ ipc_port_lock_mqueue(port)
*/
void
-ipc_port_set_seqno(port, seqno)
- ipc_port_t port;
- mach_port_seqno_t seqno;
+ipc_port_set_seqno(
+ ipc_port_t port,
+ mach_port_seqno_t seqno)
{
ipc_mqueue_t mqueue;
@@ -425,6 +423,44 @@ ipc_port_set_seqno(port, seqno)
}
/*
+ * Routine: ipc_port_set_protected_payload
+ * Purpose:
+ * Changes a port's protected payload.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_set_protected_payload(ipc_port_t port, unsigned long payload)
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ port->ip_protected_payload = payload;
+ ipc_port_flag_protected_payload_set(port);
+ imq_unlock(mqueue);
+}
+
+/*
+ * Routine: ipc_port_clear_protected_payload
+ * Purpose:
+ * Clear a port's protected payload.
+ * Conditions:
+ * The port is locked and active.
+ */
+
+void
+ipc_port_clear_protected_payload(ipc_port_t port)
+{
+ ipc_mqueue_t mqueue;
+
+ mqueue = ipc_port_lock_mqueue(port);
+ ipc_port_flag_protected_payload_clear(port);
+ imq_unlock(mqueue);
+}
+
+
+/*
* Routine: ipc_port_clear_receiver
* Purpose:
* Prepares a receive right for transmission/destruction.
@@ -493,6 +529,8 @@ ipc_port_init(
port->ip_seqno = 0;
port->ip_msgcount = 0;
port->ip_qlimit = MACH_PORT_QLIMIT_DEFAULT;
+ ipc_port_flag_protected_payload_clear(port);
+ port->ip_protected_payload = 0;
ipc_mqueue_init(&port->ip_messages);
ipc_thread_queue_init(&port->ip_blocked);
@@ -615,6 +653,7 @@ ipc_port_destroy(
/* make port be in limbo */
port->ip_receiver_name = MACH_PORT_NULL;
port->ip_destination = IP_NULL;
+ ipc_port_flag_protected_payload_clear(port);
ip_unlock(port);
if (!ipc_port_check_circularity(port, pdrequest)) {
@@ -1135,16 +1174,15 @@ ipc_port_release_receive(
*/
ipc_port_t
-ipc_port_alloc_special(space)
- ipc_space_t space;
+ipc_port_alloc_special(ipc_space_t space)
{
ipc_port_t port;
- port = (ipc_port_t) io_alloc(IOT_PORT);
+ port = ip_alloc();
if (port == IP_NULL)
return IP_NULL;
- io_lock_init(&port->ip_object);
+ ip_lock_init(port);
port->ip_references = 1;
port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
@@ -1212,12 +1250,17 @@ ipc_port_dealloc_special(
void
ipc_port_print(port)
- ipc_port_t port;
+ const ipc_port_t port;
{
printf("port 0x%x\n", port);
indent += 2;
+ iprintf("flags ");
+ printf("has_protected_payload=%d",
+ ipc_port_flag_protected_payload(port));
+ printf("\n");
+
ipc_object_print(&port->ip_object);
iprintf("receiver=0x%x", port->ip_receiver);
printf(", receiver_name=0x%x\n", port->ip_receiver_name);
@@ -1240,7 +1283,9 @@ ipc_port_print(port)
printf(", sndrs=0x%x", port->ip_blocked.ithq_base);
printf(", kobj=0x%x\n", port->ip_kobject);
- indent -=2;
+ iprintf("protected_payload=%p\n", (void *) port->ip_protected_payload);
+
+ indent -= 2;
}
#endif /* MACH_KDB */
diff --git a/ipc/ipc_port.h b/ipc/ipc_port.h
index 27d2e496..ade69679 100644
--- a/ipc/ipc_port.h
+++ b/ipc/ipc_port.h
@@ -43,11 +43,12 @@
#include <mach/kern_return.h>
#include <mach/port.h>
#include <kern/lock.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/ipc_kobject.h>
#include <ipc/ipc_mqueue.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_thread.h>
+#include <ipc/ipc_object.h>
#include "ipc_target.h"
#include <mach/rpc.h>
@@ -96,6 +97,7 @@ struct ipc_port {
mach_port_msgcount_t ip_msgcount;
mach_port_msgcount_t ip_qlimit;
struct ipc_thread_queue ip_blocked;
+ unsigned long ip_protected_payload;
};
#define ip_object ip_target.ipt_object
@@ -262,6 +264,12 @@ extern void
ipc_port_set_seqno(ipc_port_t, mach_port_seqno_t);
extern void
+ipc_port_set_protected_payload(ipc_port_t, unsigned long);
+
+extern void
+ipc_port_clear_protected_payload(ipc_port_t);
+
+extern void
ipc_port_clear_receiver(ipc_port_t);
extern void
@@ -325,4 +333,23 @@ ipc_port_dealloc_special(ipc_port_t, ipc_space_t);
#define ipc_port_release(port) \
ipc_object_release(&(port)->ip_object)
+static inline boolean_t
+ipc_port_flag_protected_payload(const struct ipc_port *port)
+{
+ return !! (port->ip_target.ipt_object.io_bits
+ & IO_BITS_PROTECTED_PAYLOAD);
+}
+
+static inline void
+ipc_port_flag_protected_payload_set(struct ipc_port *port)
+{
+ port->ip_target.ipt_object.io_bits |= IO_BITS_PROTECTED_PAYLOAD;
+}
+
+static inline void
+ipc_port_flag_protected_payload_clear(struct ipc_port *port)
+{
+ port->ip_target.ipt_object.io_bits &= ~IO_BITS_PROTECTED_PAYLOAD;
+}
+
#endif /* _IPC_IPC_PORT_H_ */
diff --git a/ipc/ipc_print.h b/ipc/ipc_print.h
index ef676a77..5e8e4f34 100644
--- a/ipc/ipc_print.h
+++ b/ipc/ipc_print.h
@@ -1,3 +1,21 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
#ifndef _IPC_PRINT_H_
#define _IPC_PRINT_H_
@@ -6,12 +24,13 @@
#include <mach/mach_types.h>
#include <mach/message.h>
#include <ipc/ipc_types.h>
+#include <ipc/ipc_pset.h>
-extern void ipc_port_print(ipc_port_t);
+extern void ipc_port_print(const ipc_port_t);
-extern void ipc_pset_print(ipc_pset_t);
+extern void ipc_pset_print(const ipc_pset_t);
-extern void ipc_kmsg_print(ipc_kmsg_t);
+extern void ipc_kmsg_print(const ipc_kmsg_t);
extern void ipc_msg_print(mach_msg_header_t*);
diff --git a/ipc/ipc_pset.c b/ipc/ipc_pset.c
index c016d276..884e8972 100644
--- a/ipc/ipc_pset.c
+++ b/ipc/ipc_pset.c
@@ -334,7 +334,7 @@ ipc_pset_destroy(
void
ipc_pset_print(
- ipc_pset_t pset)
+ const ipc_pset_t pset)
{
printf("pset 0x%x\n", pset);
@@ -345,7 +345,7 @@ ipc_pset_print(
iprintf("kmsgs = 0x%x", pset->ips_messages.imq_messages.ikmq_base);
printf(",rcvrs = 0x%x\n", pset->ips_messages.imq_threads.ithq_base);
- indent -=2;
+ indent -= 2;
}
#endif /* MACH_KDB */
diff --git a/ipc/ipc_right.c b/ipc/ipc_right.c
index 41fe3de1..773b3b10 100644
--- a/ipc/ipc_right.c
+++ b/ipc/ipc_right.c
@@ -43,7 +43,6 @@
#include <ipc/ipc_entry.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_object.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_pset.h>
#include <ipc/ipc_marequest.h>
@@ -142,7 +141,8 @@ ipc_right_reverse(
return TRUE;
}
- if (ipc_hash_lookup(space, (ipc_object_t) port, namep, entryp)) {
+ if ((*entryp = ipc_reverse_lookup(space, (ipc_object_t) port))) {
+ *namep = (*entryp)->ie_name;
assert((entry = *entryp) != IE_NULL);
assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
assert(port == (ipc_port_t) entry->ie_object);
@@ -331,10 +331,10 @@ ipc_right_dncancel(
*/
boolean_t
-ipc_right_inuse(space, name, entry)
- ipc_space_t space;
- mach_port_t name;
- ipc_entry_t entry;
+ipc_right_inuse(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
{
ipc_entry_bits_t bits = entry->ie_bits;
@@ -359,11 +359,11 @@ ipc_right_inuse(space, name, entry)
*/
boolean_t
-ipc_right_check(space, port, name, entry)
- ipc_space_t space;
- ipc_port_t port;
- mach_port_t name;
- ipc_entry_t entry;
+ipc_right_check(
+ ipc_space_t space,
+ ipc_port_t port,
+ mach_port_t name,
+ ipc_entry_t entry)
{
ipc_entry_bits_t bits;
@@ -392,7 +392,7 @@ ipc_right_check(space, port, name, entry)
ipc_marequest_cancel(space, name);
}
- ipc_hash_delete(space, (ipc_object_t) port, name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
} else {
assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
assert(IE_BITS_UREFS(bits) == 1);
@@ -423,7 +423,7 @@ ipc_right_check(space, port, name, entry)
* Purpose:
* Cleans up an entry in a dead space.
* The entry isn't deallocated or removed
- * from reverse hash tables.
+ * from the reverse mappings.
* Conditions:
* The space is dead and unlocked.
*/
@@ -609,8 +609,7 @@ ipc_right_destroy(
}
if (type == MACH_PORT_TYPE_SEND)
- ipc_hash_delete(space, (ipc_object_t) port,
- name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
ip_lock(port);
@@ -697,10 +696,10 @@ ipc_right_destroy(
*/
kern_return_t
-ipc_right_dealloc(space, name, entry)
- ipc_space_t space;
- mach_port_t name;
- ipc_entry_t entry;
+ipc_right_dealloc(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
{
ipc_entry_bits_t bits = entry->ie_bits;
mach_port_type_t type = IE_BITS_TYPE(bits);
@@ -789,8 +788,7 @@ ipc_right_dealloc(space, name, entry)
dnrequest = ipc_right_dncancel_macro(space, port,
name, entry);
- ipc_hash_delete(space, (ipc_object_t) port,
- name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
if (bits & IE_BITS_MAREQUEST)
ipc_marequest_cancel(space, name);
@@ -874,12 +872,12 @@ ipc_right_dealloc(space, name, entry)
*/
kern_return_t
-ipc_right_delta(space, name, entry, right, delta)
- ipc_space_t space;
- mach_port_t name;
- ipc_entry_t entry;
- mach_port_right_t right;
- mach_port_delta_t delta;
+ipc_right_delta(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry,
+ mach_port_right_t right,
+ mach_port_delta_t delta)
{
ipc_entry_bits_t bits = entry->ie_bits;
@@ -1134,8 +1132,7 @@ ipc_right_delta(space, name, entry, right, delta)
dnrequest = ipc_right_dncancel_macro(
space, port, name, entry);
- ipc_hash_delete(space, (ipc_object_t) port,
- name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
if (bits & IE_BITS_MAREQUEST)
ipc_marequest_cancel(space, name);
@@ -1410,8 +1407,8 @@ ipc_right_copyin(
assert(IE_BITS_UREFS(bits) > 0);
assert(port->ip_srights > 0);
- ipc_hash_insert(space, (ipc_object_t) port,
- name, entry);
+ entry->ie_name = name;
+ ipc_reverse_insert(space, (ipc_object_t) port, entry);
ip_reference(port);
} else {
@@ -1432,6 +1429,12 @@ ipc_right_copyin(
port->ip_receiver_name = MACH_PORT_NULL;
port->ip_destination = IP_NULL;
+
+ /*
+ * Clear the protected payload field to retain
+ * the behavior of mach_msg.
+ */
+ ipc_port_flag_protected_payload_clear(port);
ip_unlock(port);
*objectp = (ipc_object_t) port;
@@ -1528,8 +1531,7 @@ ipc_right_copyin(
dnrequest = ipc_right_dncancel_macro(
space, port, name, entry);
- ipc_hash_delete(space, (ipc_object_t) port,
- name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
if (bits & IE_BITS_MAREQUEST)
ipc_marequest_cancel(space, name);
@@ -1790,8 +1792,7 @@ ipc_right_copyin_two(
dnrequest = ipc_right_dncancel_macro(space, port,
name, entry);
- ipc_hash_delete(space, (ipc_object_t) port,
- name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
if (bits & IE_BITS_MAREQUEST)
ipc_marequest_cancel(space, name);
@@ -1915,8 +1916,8 @@ ipc_right_copyout(
/* entry is locked holding ref, so can use port */
- ipc_hash_insert(space, (ipc_object_t) port,
- name, entry);
+ entry->ie_name = name;
+ ipc_reverse_insert(space, (ipc_object_t) port, entry);
}
entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1;
@@ -1932,6 +1933,12 @@ ipc_right_copyout(
port->ip_receiver_name = name;
port->ip_receiver = space;
+ /*
+ * Clear the protected payload field to retain
+ * the behavior of mach_msg.
+ */
+ ipc_port_flag_protected_payload_clear(port);
+
assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
if (bits & MACH_PORT_TYPE_SEND) {
@@ -1944,8 +1951,7 @@ ipc_right_copyout(
/* entry is locked holding ref, so can use port */
- ipc_hash_delete(space, (ipc_object_t) port,
- name, entry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
} else {
assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
assert(IE_BITS_UREFS(bits) == 0);
@@ -2071,7 +2077,7 @@ ipc_right_rename(
ipc_marequest_rename(space, oname, nname);
}
- /* initialize nentry before letting ipc_hash_insert see it */
+ /* initialize nentry before letting ipc_reverse_insert see it */
assert((nentry->ie_bits & IE_BITS_RIGHT_MASK) == 0);
nentry->ie_bits |= bits & IE_BITS_RIGHT_MASK;
@@ -2085,8 +2091,9 @@ ipc_right_rename(
port = (ipc_port_t) object;
assert(port != IP_NULL);
- ipc_hash_delete(space, (ipc_object_t) port, oname, oentry);
- ipc_hash_insert(space, (ipc_object_t) port, nname, nentry);
+ ipc_reverse_remove(space, (ipc_object_t) port);
+ nentry->ie_name = nname;
+ ipc_reverse_insert(space, (ipc_object_t) port, nentry);
break;
}
diff --git a/ipc/ipc_space.c b/ipc/ipc_space.c
index ab55e838..894cf58e 100644
--- a/ipc/ipc_space.c
+++ b/ipc/ipc_space.c
@@ -46,8 +46,6 @@
#include <kern/slab.h>
#include <ipc/port.h>
#include <ipc/ipc_entry.h>
-#include <ipc/ipc_splay.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
@@ -82,6 +80,9 @@ ipc_space_release(
ipc_space_release_macro(space);
}
+/* A place-holder object for the zeroth entry. */
+struct ipc_entry zero_entry;
+
/*
* Routine: ipc_space_create
* Purpose:
@@ -98,56 +99,27 @@ ipc_space_release(
kern_return_t
ipc_space_create(
- ipc_table_size_t initial,
ipc_space_t *spacep)
{
ipc_space_t space;
- ipc_entry_t table;
- ipc_entry_num_t new_size;
- mach_port_index_t index;
space = is_alloc();
if (space == IS_NULL)
return KERN_RESOURCE_SHORTAGE;
- table = it_entries_alloc(initial);
- if (table == IE_NULL) {
- is_free(space);
- return KERN_RESOURCE_SHORTAGE;
- }
-
- new_size = initial->its_size;
- memset((void *) table, 0, new_size * sizeof(struct ipc_entry));
-
- /*
- * Initialize the free list in the table.
- * Add the entries in reverse order, and
- * set the generation number to -1, so that
- * initial allocations produce "natural" names.
- */
-
- for (index = 0; index < new_size; index++) {
- ipc_entry_t entry = &table[index];
-
- entry->ie_bits = IE_BITS_GEN_MASK;
- entry->ie_next = index+1;
- }
- table[new_size-1].ie_next = 0;
-
is_ref_lock_init(space);
space->is_references = 2;
is_lock_init(space);
space->is_active = TRUE;
- space->is_growing = FALSE;
- space->is_table = table;
- space->is_table_size = new_size;
- space->is_table_next = initial+1;
- ipc_splay_tree_init(&space->is_tree);
- space->is_tree_total = 0;
- space->is_tree_small = 0;
- space->is_tree_hash = 0;
+ rdxtree_init(&space->is_map);
+ rdxtree_init(&space->is_reverse_map);
+ /* The zeroth entry is reserved. */
+ rdxtree_insert(&space->is_map, 0, &zero_entry);
+ space->is_size = 1;
+ space->is_free_list = NULL;
+ space->is_free_list_size = 0;
*spacep = space;
return KERN_SUCCESS;
@@ -201,10 +173,6 @@ void
ipc_space_destroy(
ipc_space_t space)
{
- ipc_tree_entry_t tentry;
- ipc_entry_t table;
- ipc_entry_num_t size;
- mach_port_index_t index;
boolean_t active;
assert(space != IS_NULL);
@@ -217,59 +185,25 @@ ipc_space_destroy(
if (!active)
return;
- /*
- * If somebody is trying to grow the table,
- * we must wait until they finish and figure
- * out the space died.
- */
+ ipc_entry_t entry;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&space->is_map, &iter, entry) {
+ if (entry->ie_name == MACH_PORT_NULL)
+ continue;
- is_read_lock(space);
- while (space->is_growing) {
- assert_wait((event_t) space, FALSE);
- is_read_unlock(space);
- thread_block((void (*)(void)) 0);
- is_read_lock(space);
- }
- is_read_unlock(space);
-
- /*
- * Now we can futz with it without having it locked.
- */
-
- table = space->is_table;
- size = space->is_table_size;
-
- for (index = 0; index < size; index++) {
- ipc_entry_t entry = &table[index];
mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);
if (type != MACH_PORT_TYPE_NONE) {
mach_port_t name =
- MACH_PORT_MAKEB(index, entry->ie_bits);
+ MACH_PORT_MAKEB(entry->ie_name, entry->ie_bits);
ipc_right_clean(space, name, entry);
}
- }
-
- it_entries_free(space->is_table_next-1, table);
-
- for (tentry = ipc_splay_traverse_start(&space->is_tree);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
- mach_port_type_t type = IE_BITS_TYPE(tentry->ite_bits);
- mach_port_t name = tentry->ite_name;
-
- assert(type != MACH_PORT_TYPE_NONE);
-
- /* use object before ipc_right_clean releases ref */
-
- if (type == MACH_PORT_TYPE_SEND)
- ipc_hash_global_delete(space, tentry->ite_object,
- name, tentry);
- ipc_right_clean(space, name, &tentry->ite_entry);
+ ie_free(entry);
}
- ipc_splay_traverse_finish(&space->is_tree);
+ rdxtree_remove_all(&space->is_map);
+ rdxtree_remove_all(&space->is_reverse_map);
/*
* Because the space is now dead,
diff --git a/ipc/ipc_space.h b/ipc/ipc_space.h
index c4683d20..a2aac40a 100644
--- a/ipc/ipc_space.h
+++ b/ipc/ipc_space.h
@@ -42,25 +42,18 @@
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/mach_types.h>
-#include <kern/macro_help.h>
+#include <machine/vm_param.h>
+#include <kern/macros.h>
#include <kern/lock.h>
+#include <kern/rdxtree.h>
#include <kern/slab.h>
-#include <ipc/ipc_splay.h>
+#include <ipc/ipc_entry.h>
#include <ipc/ipc_types.h>
/*
* Every task has a space of IPC capabilities.
* IPC operations like send and receive use this space.
* IPC kernel calls manipulate the space of the target task.
- *
- * Every space has a non-NULL is_table with is_table_size entries.
- * A space may have a NULL is_tree. is_tree_small records the
- * number of entries in the tree that, if the table were to grow
- * to the next larger size, would move from the tree to the table.
- *
- * is_growing marks when the table is in the process of growing.
- * When the table is growing, it can't be freed or grown by another
- * thread, because of krealloc/kmem_realloc's requirements.
*/
typedef unsigned int ipc_space_refs_t;
@@ -69,18 +62,18 @@ struct ipc_space {
decl_simple_lock_data(,is_ref_lock_data)
ipc_space_refs_t is_references;
- decl_simple_lock_data(,is_lock_data)
+ struct lock is_lock_data;
boolean_t is_active; /* is the space alive? */
- boolean_t is_growing; /* is the space growing? */
- ipc_entry_t is_table; /* an array of entries */
- ipc_entry_num_t is_table_size; /* current size of table */
- struct ipc_table_size *is_table_next; /* info for larger table */
- struct ipc_splay_tree is_tree; /* a splay tree of entries */
- ipc_entry_num_t is_tree_total; /* number of entries in the tree */
- ipc_entry_num_t is_tree_small; /* # of small entries in the tree */
- ipc_entry_num_t is_tree_hash; /* # of hashed entries in the tree */
+ struct rdxtree is_map; /* a map of entries */
+ size_t is_size; /* number of entries */
+ struct rdxtree is_reverse_map; /* maps objects to entries */
+ ipc_entry_t is_free_list; /* a linked list of free entries */
+ size_t is_free_list_size; /* number of free entries */
+#define IS_FREE_LIST_SIZE_LIMIT 64 /* maximum number of entries
+ in the free list */
};
+
#define IS_NULL ((ipc_space_t) 0)
extern struct kmem_cache ipc_space_cache;
@@ -114,25 +107,204 @@ MACRO_BEGIN \
is_free(is); \
MACRO_END
-#define is_lock_init(is) simple_lock_init(&(is)->is_lock_data)
+#define is_lock_init(is) lock_init(&(is)->is_lock_data, TRUE)
-#define is_read_lock(is) simple_lock(&(is)->is_lock_data)
-#define is_read_unlock(is) simple_unlock(&(is)->is_lock_data)
+#define is_read_lock(is) lock_read(&(is)->is_lock_data)
+#define is_read_unlock(is) lock_done(&(is)->is_lock_data)
-#define is_write_lock(is) simple_lock(&(is)->is_lock_data)
-#define is_write_lock_try(is) simple_lock_try(&(is)->is_lock_data)
-#define is_write_unlock(is) simple_unlock(&(is)->is_lock_data)
+#define is_write_lock(is) lock_write(&(is)->is_lock_data)
+#define is_write_lock_try(is) lock_try_write(&(is)->is_lock_data)
+#define is_write_unlock(is) lock_done(&(is)->is_lock_data)
-#define is_write_to_read_lock(is)
+#define is_write_to_read_lock(is) lock_write_to_read(&(is)->is_lock_data)
extern void ipc_space_reference(struct ipc_space *space);
extern void ipc_space_release(struct ipc_space *space);
-#define is_reference(is) ipc_space_reference(is)
-#define is_release(is) ipc_space_release(is)
+#define is_reference(is) ipc_space_reference_macro(is)
+#define is_release(is) ipc_space_release_macro(is)
-kern_return_t ipc_space_create(ipc_table_size_t, ipc_space_t *);
+kern_return_t ipc_space_create(ipc_space_t *);
kern_return_t ipc_space_create_special(struct ipc_space **);
void ipc_space_destroy(struct ipc_space *);
+/* IPC entry lookups. */
+
+/*
+ * Routine: ipc_entry_lookup
+ * Purpose:
+ * Searches for an entry, given its name.
+ * Conditions:
+ * The space must be read or write locked throughout.
+ * The space must be active.
+ */
+
+static inline ipc_entry_t
+ipc_entry_lookup(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_entry_t entry;
+
+ assert(space->is_active);
+ entry = rdxtree_lookup(&space->is_map, (rdxtree_key_t) name);
+ if (entry != IE_NULL
+ && IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
+ entry = NULL;
+ assert((entry == IE_NULL) || IE_BITS_TYPE(entry->ie_bits));
+ return entry;
+}
+
+/*
+ * Routine: ipc_entry_get
+ * Purpose:
+ * Tries to allocate an entry out of the space.
+ * Conditions:
+ * The space is write-locked and active throughout.
+ * An object may be locked. Will not allocate memory.
+ * Returns:
+ * KERN_SUCCESS A free entry was found.
+ * KERN_NO_SPACE No entry allocated.
+ */
+
+static inline kern_return_t
+ipc_entry_get(
+ ipc_space_t space,
+ mach_port_t *namep,
+ ipc_entry_t *entryp)
+{
+ mach_port_t new_name;
+ ipc_entry_t free_entry;
+
+ assert(space->is_active);
+
+ /* Get entry from the free list. */
+ free_entry = space->is_free_list;
+ if (free_entry == IE_NULL)
+ return KERN_NO_SPACE;
+
+ space->is_free_list = free_entry->ie_next_free;
+ space->is_free_list_size -= 1;
+
+ /*
+ * Initialize the new entry. We need only
+ * increment the generation number and clear ie_request.
+ */
+
+ {
+ mach_port_gen_t gen;
+
+ assert((free_entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
+ gen = free_entry->ie_bits + IE_BITS_GEN_ONE;
+ free_entry->ie_bits = gen;
+ free_entry->ie_request = 0;
+ new_name = MACH_PORT_MAKE(free_entry->ie_name, gen);
+ }
+
+ /*
+ * The new name can't be MACH_PORT_NULL because index
+ * is non-zero. It can't be MACH_PORT_DEAD because
+ * the table isn't allowed to grow big enough.
+ * (See comment in ipc/ipc_table.h.)
+ */
+
+ assert(MACH_PORT_VALID(new_name));
+ assert(free_entry->ie_object == IO_NULL);
+
+ space->is_size += 1;
+ *namep = new_name;
+ *entryp = free_entry;
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: ipc_entry_dealloc
+ * Purpose:
+ * Deallocates an entry from a space.
+ * Conditions:
+ * The space must be write-locked throughout.
+ * The space must be active.
+ */
+
+static inline void
+ipc_entry_dealloc(
+ ipc_space_t space,
+ mach_port_t name,
+ ipc_entry_t entry)
+{
+ assert(space->is_active);
+ assert(entry->ie_object == IO_NULL);
+ assert(entry->ie_request == 0);
+
+ if (space->is_free_list_size < IS_FREE_LIST_SIZE_LIMIT) {
+ space->is_free_list_size += 1;
+ entry->ie_bits &= IE_BITS_GEN_MASK;
+ entry->ie_next_free = space->is_free_list;
+ space->is_free_list = entry;
+ } else {
+ rdxtree_remove(&space->is_map, (rdxtree_key_t) name);
+ ie_free(entry);
+ }
+ space->is_size -= 1;
+}
+
+/* Reverse lookups. */
+
+/* Cast a pointer to a suitable key. */
+#define KEY(X) \
+ ({ \
+ assert((((unsigned long) (X)) & 0x07) == 0); \
+ ((unsigned long long) \
+ (((unsigned long) (X) - VM_MIN_KERNEL_ADDRESS) >> 3)); \
+ })
+
+/* Insert (OBJ, ENTRY) pair into the reverse mapping. SPACE must
+ be write-locked. */
+static inline kern_return_t
+ipc_reverse_insert(ipc_space_t space,
+ ipc_object_t obj,
+ ipc_entry_t entry)
+{
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+ return (kern_return_t) rdxtree_insert(&space->is_reverse_map,
+ KEY(obj), entry);
+}
+
+/* Remove OBJ from the reverse mapping. SPACE must be
+ write-locked. */
+static inline ipc_entry_t
+ipc_reverse_remove(ipc_space_t space,
+ ipc_object_t obj)
+{
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+ return rdxtree_remove(&space->is_reverse_map, KEY(obj));
+}
+
+/* Remove all entries from the reverse mapping. SPACE must be
+ write-locked. */
+static inline void
+ipc_reverse_remove_all(ipc_space_t space)
+{
+ assert(space != IS_NULL);
+ rdxtree_remove_all(&space->is_reverse_map);
+ assert(space->is_reverse_map.height == 0);
+ assert(space->is_reverse_map.root == NULL);
+}
+
+/* Return ENTRY related to OBJ, or NULL if no such entry is found in
+ the reverse mapping. SPACE must be read-locked or
+ write-locked. */
+static inline ipc_entry_t
+ipc_reverse_lookup(ipc_space_t space,
+ ipc_object_t obj)
+{
+ assert(space != IS_NULL);
+ assert(obj != IO_NULL);
+ return rdxtree_lookup(&space->is_reverse_map, KEY(obj));
+}
+
+#undef KEY
+
#endif /* _IPC_IPC_SPACE_H_ */
diff --git a/ipc/ipc_splay.c b/ipc/ipc_splay.c
deleted file mode 100644
index 6fb5bcbc..00000000
--- a/ipc/ipc_splay.c
+++ /dev/null
@@ -1,920 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- */
-/*
- * File: ipc/ipc_splay.c
- * Author: Rich Draves
- * Date: 1989
- *
- * Primitive splay tree operations.
- */
-
-#include <mach/port.h>
-#include <kern/assert.h>
-#include <kern/macro_help.h>
-#include <ipc/ipc_entry.h>
-#include <ipc/ipc_splay.h>
-
-/*
- * Splay trees are self-adjusting binary search trees.
- * They have the following attractive properties:
- * 1) Space efficient; only two pointers per entry.
- * 2) Robust performance; amortized O(log n) per operation.
- * 3) Recursion not needed.
- * This makes them a good fall-back data structure for those
- * entries that don't fit into the lookup table.
- *
- * The paper by Sleator and Tarjan, JACM v. 32, no. 3, pp. 652-686,
- * describes the splaying operation. ipc_splay_prim_lookup
- * and ipc_splay_prim_assemble implement the top-down splay
- * described on p. 669.
- *
- * The tree is stored in an unassembled form. If ist_root is null,
- * then the tree has no entries. Otherwise, ist_name records
- * the value used for the last lookup. ist_root points to the
- * middle tree obtained from the top-down splay. ist_ltree and
- * ist_rtree point to left and right subtrees, whose entries
- * are all smaller (larger) than those in the middle tree.
- * ist_ltreep and ist_rtreep are pointers to fields in the
- * left and right subtrees. ist_ltreep points to the rchild field
- * of the largest entry in ltree, and ist_rtreep points to the
- * lchild field of the smallest entry in rtree. The pointed-to
- * fields aren't initialized. If the left (right) subtree is null,
- * then ist_ltreep (ist_rtreep) points to the ist_ltree (ist_rtree)
- * field in the splay structure itself.
- *
- * The primary advantage of the unassembled form is that repeated
- * unsuccessful lookups are efficient. In particular, an unsuccessful
- * lookup followed by an insert only requires one splaying operation.
- *
- * The traversal algorithm works via pointer inversion.
- * When descending down the tree, child pointers are reversed
- * to point back to the parent entry. When ascending,
- * the pointers are restored to their original value.
- *
- * The biggest potential problem with the splay tree implementation
- * is that the operations, even lookup, require an exclusive lock.
- * If IPC spaces are protected with exclusive locks, then
- * the splay tree doesn't require its own lock, and ist_lock/ist_unlock
- * needn't do anything. If IPC spaces are protected with read/write
- * locks then ist_lock/ist_unlock should provide exclusive access.
- *
- * If it becomes important to let lookups run in parallel,
- * or if the restructuring makes lookups too expensive, then
- * there is hope. Use a read/write lock on the splay tree.
- * Keep track of the number of entries in the tree. When doing
- * a lookup, first try a non-restructuring lookup with a read lock held,
- * with a bound (based on log of size of the tree) on the number of
- * entries to traverse. If the lookup runs up against the bound,
- * then take a write lock and do a reorganizing lookup.
- * This way, if lookups only access roughly balanced parts
- * of the tree, then lookups run in parallel and do no restructuring.
- *
- * The traversal algorithm currently requires an exclusive lock.
- * If that is a problem, the tree could be changed from an lchild/rchild
- * representation to a leftmost child/right sibling representation.
- * In conjunction with non-restructing lookups, this would let
- * lookups and traversals all run in parallel. But this representation
- * is more complicated and would slow down the operations.
- */
-
-/*
- * Boundary values to hand to ipc_splay_prim_lookup:
- */
-
-#define MACH_PORT_SMALLEST ((mach_port_t) 0)
-#define MACH_PORT_LARGEST ((mach_port_t) ~0)
-
-/*
- * Routine: ipc_splay_prim_lookup
- * Purpose:
- * Searches for the node labeled name in the splay tree.
- * Returns three nodes (treep, ltreep, rtreep) and
- * two pointers to nodes (ltreepp, rtreepp).
- *
- * ipc_splay_prim_lookup splits the supplied tree into
- * three subtrees, left, middle, and right, returned
- * in ltreep, treep, and rtreep.
- *
- * If name is present in the tree, then it is at
- * the root of the middle tree. Otherwise, the root
- * of the middle tree is the last node traversed.
- *
- * ipc_splay_prim_lookup returns a pointer into
- * the left subtree, to the rchild field of its
- * largest node, in ltreepp. It returns a pointer
- * into the right subtree, to the lchild field of its
- * smallest node, in rtreepp.
- */
-
-static void
-ipc_splay_prim_lookup(
- mach_port_t name,
- ipc_tree_entry_t tree,
- ipc_tree_entry_t *treep,
- ipc_tree_entry_t *ltreep,
- ipc_tree_entry_t **ltreepp,
- ipc_tree_entry_t *rtreep,
- ipc_tree_entry_t **rtreepp)
-{
- mach_port_t tname; /* temp name */
- ipc_tree_entry_t lchild, rchild; /* temp child pointers */
-
- assert(tree != ITE_NULL);
-
-#define link_left \
-MACRO_BEGIN \
- *ltreep = tree; \
- ltreep = &tree->ite_rchild; \
- tree = *ltreep; \
-MACRO_END
-
-#define link_right \
-MACRO_BEGIN \
- *rtreep = tree; \
- rtreep = &tree->ite_lchild; \
- tree = *rtreep; \
-MACRO_END
-
-#define rotate_left \
-MACRO_BEGIN \
- ipc_tree_entry_t temp = tree; \
- \
- tree = temp->ite_rchild; \
- temp->ite_rchild = tree->ite_lchild; \
- tree->ite_lchild = temp; \
-MACRO_END
-
-#define rotate_right \
-MACRO_BEGIN \
- ipc_tree_entry_t temp = tree; \
- \
- tree = temp->ite_lchild; \
- temp->ite_lchild = tree->ite_rchild; \
- tree->ite_rchild = temp; \
-MACRO_END
-
- while (name != (tname = tree->ite_name)) {
- if (name < tname) {
- /* descend to left */
-
- lchild = tree->ite_lchild;
- if (lchild == ITE_NULL)
- break;
- tname = lchild->ite_name;
-
- if ((name < tname) &&
- (lchild->ite_lchild != ITE_NULL))
- rotate_right;
- link_right;
- if ((name > tname) &&
- (lchild->ite_rchild != ITE_NULL))
- link_left;
- } else {
- /* descend to right */
-
- rchild = tree->ite_rchild;
- if (rchild == ITE_NULL)
- break;
- tname = rchild->ite_name;
-
- if ((name > tname) &&
- (rchild->ite_rchild != ITE_NULL))
- rotate_left;
- link_left;
- if ((name < tname) &&
- (rchild->ite_lchild != ITE_NULL))
- link_right;
- }
-
- assert(tree != ITE_NULL);
- }
-
- *treep = tree;
- *ltreepp = ltreep;
- *rtreepp = rtreep;
-
-#undef link_left
-#undef link_right
-#undef rotate_left
-#undef rotate_right
-}
-
-/*
- * Routine: ipc_splay_prim_assemble
- * Purpose:
- * Assembles the results of ipc_splay_prim_lookup
- * into a splay tree with the found node at the root.
- *
- * ltree and rtree are by-reference so storing
- * through ltreep and rtreep can change them.
- */
-
-static void
-ipc_splay_prim_assemble(
- ipc_tree_entry_t tree,
- ipc_tree_entry_t *ltree,
- ipc_tree_entry_t *ltreep,
- ipc_tree_entry_t *rtree,
- ipc_tree_entry_t *rtreep)
-{
- assert(tree != ITE_NULL);
-
- *ltreep = tree->ite_lchild;
- *rtreep = tree->ite_rchild;
-
- tree->ite_lchild = *ltree;
- tree->ite_rchild = *rtree;
-}
-
-/*
- * Routine: ipc_splay_tree_init
- * Purpose:
- * Initialize a raw splay tree for use.
- */
-
-void
-ipc_splay_tree_init(
- ipc_splay_tree_t splay)
-{
- splay->ist_root = ITE_NULL;
-}
-
-/*
- * Routine: ipc_splay_tree_pick
- * Purpose:
- * Picks and returns a random entry in a splay tree.
- * Returns FALSE if the splay tree is empty.
- */
-
-boolean_t
-ipc_splay_tree_pick(
- ipc_splay_tree_t splay,
- mach_port_t *namep,
- ipc_tree_entry_t *entryp)
-{
- ipc_tree_entry_t root;
-
- ist_lock(splay);
-
- root = splay->ist_root;
- if (root != ITE_NULL) {
- *namep = root->ite_name;
- *entryp = root;
- }
-
- ist_unlock(splay);
-
- return root != ITE_NULL;
-}
-
-/*
- * Routine: ipc_splay_tree_lookup
- * Purpose:
- * Finds an entry in a splay tree.
- * Returns ITE_NULL if not found.
- */
-
-ipc_tree_entry_t
-ipc_splay_tree_lookup(
- ipc_splay_tree_t splay,
- mach_port_t name)
-{
- ipc_tree_entry_t root;
-
- ist_lock(splay);
-
- root = splay->ist_root;
- if (root != ITE_NULL) {
- if (splay->ist_name != name) {
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
- ipc_splay_prim_lookup(name, root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- splay->ist_name = name;
- splay->ist_root = root;
- }
-
- if (name != root->ite_name)
- root = ITE_NULL;
- }
-
- ist_unlock(splay);
-
- return root;
-}
-
-/*
- * Routine: ipc_splay_tree_insert
- * Purpose:
- * Inserts a new entry into a splay tree.
- * The caller supplies a new entry.
- * The name can't already be present in the tree.
- */
-
-void
-ipc_splay_tree_insert(
- ipc_splay_tree_t splay,
- mach_port_t name,
- ipc_tree_entry_t entry)
-{
- ipc_tree_entry_t root;
-
- assert(entry != ITE_NULL);
-
- ist_lock(splay);
-
- root = splay->ist_root;
- if (root == ITE_NULL) {
- entry->ite_lchild = ITE_NULL;
- entry->ite_rchild = ITE_NULL;
- } else {
- if (splay->ist_name != name) {
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
- ipc_splay_prim_lookup(name, root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- }
-
- assert(root->ite_name != name);
-
- if (name < root->ite_name) {
- assert(root->ite_lchild == ITE_NULL);
-
- *splay->ist_ltreep = ITE_NULL;
- *splay->ist_rtreep = root;
- } else {
- assert(root->ite_rchild == ITE_NULL);
-
- *splay->ist_ltreep = root;
- *splay->ist_rtreep = ITE_NULL;
- }
-
- entry->ite_lchild = splay->ist_ltree;
- entry->ite_rchild = splay->ist_rtree;
- }
-
- entry->ite_name = name;
- splay->ist_root = entry;
- splay->ist_name = name;
- splay->ist_ltreep = &splay->ist_ltree;
- splay->ist_rtreep = &splay->ist_rtree;
-
- ist_unlock(splay);
-}
-
-/*
- * Routine: ipc_splay_tree_delete
- * Purpose:
- * Deletes an entry from a splay tree.
- * The name must be present in the tree.
- * Frees the entry.
- *
- * The "entry" argument isn't currently used.
- * Other implementations might want it, though.
- */
-
-void
-ipc_splay_tree_delete(
- ipc_splay_tree_t splay,
- mach_port_t name,
- ipc_tree_entry_t entry)
-{
- ipc_tree_entry_t root, saved;
-
- ist_lock(splay);
-
- root = splay->ist_root;
- assert(root != ITE_NULL);
-
- if (splay->ist_name != name) {
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
- ipc_splay_prim_lookup(name, root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- }
-
- assert(root->ite_name == name);
- assert(root == entry);
-
- *splay->ist_ltreep = root->ite_lchild;
- *splay->ist_rtreep = root->ite_rchild;
- ite_free(root);
-
- root = splay->ist_ltree;
- saved = splay->ist_rtree;
-
- if (root == ITE_NULL)
- root = saved;
- else if (saved != ITE_NULL) {
- /*
- * Find the largest node in the left subtree, and splay it
- * to the root. Then add the saved right subtree.
- */
-
- ipc_splay_prim_lookup(MACH_PORT_LARGEST, root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
-
- assert(root->ite_rchild == ITE_NULL);
- root->ite_rchild = saved;
- }
-
- splay->ist_root = root;
- if (root != ITE_NULL) {
- splay->ist_name = root->ite_name;
- splay->ist_ltreep = &splay->ist_ltree;
- splay->ist_rtreep = &splay->ist_rtree;
- }
-
- ist_unlock(splay);
-}
-
-/*
- * Routine: ipc_splay_tree_split
- * Purpose:
- * Split a splay tree. Puts all entries smaller than "name"
- * into a new tree, "small".
- *
- * Doesn't do locking on "small", because nobody else
- * should be fiddling with the uninitialized tree.
- */
-
-void
-ipc_splay_tree_split(
- ipc_splay_tree_t splay,
- mach_port_t name,
- ipc_splay_tree_t small)
-{
- ipc_tree_entry_t root;
-
- ipc_splay_tree_init(small);
-
- ist_lock(splay);
-
- root = splay->ist_root;
- if (root != ITE_NULL) {
- /* lookup name, to get it (or last traversed) to the top */
-
- if (splay->ist_name != name) {
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
- ipc_splay_prim_lookup(name, root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- }
-
- if (root->ite_name < name) {
- /* root goes into small */
-
- *splay->ist_ltreep = root->ite_lchild;
- *splay->ist_rtreep = ITE_NULL;
- root->ite_lchild = splay->ist_ltree;
- assert(root->ite_rchild == ITE_NULL);
-
- small->ist_root = root;
- small->ist_name = root->ite_name;
- small->ist_ltreep = &small->ist_ltree;
- small->ist_rtreep = &small->ist_rtree;
-
- /* rtree goes into splay */
-
- root = splay->ist_rtree;
- splay->ist_root = root;
- if (root != ITE_NULL) {
- splay->ist_name = root->ite_name;
- splay->ist_ltreep = &splay->ist_ltree;
- splay->ist_rtreep = &splay->ist_rtree;
- }
- } else {
- /* root stays in splay */
-
- *splay->ist_ltreep = root->ite_lchild;
- root->ite_lchild = ITE_NULL;
-
- splay->ist_root = root;
- splay->ist_name = name;
- splay->ist_ltreep = &splay->ist_ltree;
-
- /* ltree goes into small */
-
- root = splay->ist_ltree;
- small->ist_root = root;
- if (root != ITE_NULL) {
- small->ist_name = root->ite_name;
- small->ist_ltreep = &small->ist_ltree;
- small->ist_rtreep = &small->ist_rtree;
- }
- }
- }
-
- ist_unlock(splay);
-}
-
-/*
- * Routine: ipc_splay_tree_join
- * Purpose:
- * Joins two splay trees. Merges the entries in "small",
- * which must all be smaller than the entries in "splay",
- * into "splay".
- */
-
-void
-ipc_splay_tree_join(
- ipc_splay_tree_t splay,
- ipc_splay_tree_t small)
-{
- ipc_tree_entry_t sroot;
-
- /* pull entries out of small */
-
- ist_lock(small);
-
- sroot = small->ist_root;
- if (sroot != ITE_NULL) {
- ipc_splay_prim_assemble(sroot,
- &small->ist_ltree, small->ist_ltreep,
- &small->ist_rtree, small->ist_rtreep);
- small->ist_root = ITE_NULL;
- }
-
- ist_unlock(small);
-
- /* put entries, if any, into splay */
-
- if (sroot != ITE_NULL) {
- ipc_tree_entry_t root;
-
- ist_lock(splay);
-
- root = splay->ist_root;
- if (root == ITE_NULL) {
- root = sroot;
- } else {
- /* get smallest entry in splay tree to top */
-
- if (splay->ist_name != MACH_PORT_SMALLEST) {
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
- ipc_splay_prim_lookup(MACH_PORT_SMALLEST,
- root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- }
-
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
-
- assert(root->ite_lchild == ITE_NULL);
- assert(sroot->ite_name < root->ite_name);
- root->ite_lchild = sroot;
- }
-
- splay->ist_root = root;
- splay->ist_name = root->ite_name;
- splay->ist_ltreep = &splay->ist_ltree;
- splay->ist_rtreep = &splay->ist_rtree;
-
- ist_unlock(splay);
- }
-}
-
-/*
- * Routine: ipc_splay_tree_bounds
- * Purpose:
- * Given a name, returns the largest value present
- * in the tree that is smaller than or equal to the name,
- * or ~0 if no such value exists. Similarly, returns
- * the smallest value present that is greater than or
- * equal to the name, or 0 if no such value exists.
- *
- * Hence, if
- * lower = upper, then lower = name = upper
- * and name is present in the tree
- * lower = ~0 and upper = 0,
- * then the tree is empty
- * lower = ~0 and upper > 0, then name < upper
- * and upper is smallest value in tree
- * lower < ~0 and upper = 0, then lower < name
- * and lower is largest value in tree
- * lower < ~0 and upper > 0, then lower < name < upper
- * and they are tight bounds on name
- *
- * (Note MACH_PORT_SMALLEST = 0 and MACH_PORT_LARGEST = ~0.)
- */
-
-void
-ipc_splay_tree_bounds(
- ipc_splay_tree_t splay,
- mach_port_t name,
- mach_port_t *lowerp,
- mach_port_t *upperp)
-{
- ipc_tree_entry_t root;
-
- ist_lock(splay);
-
- root = splay->ist_root;
- if (root == ITE_NULL) {
- *lowerp = MACH_PORT_LARGEST;
- *upperp = MACH_PORT_SMALLEST;
- } else {
- mach_port_t rname;
-
- if (splay->ist_name != name) {
- ipc_splay_prim_assemble(root,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
- ipc_splay_prim_lookup(name, root, &root,
- &splay->ist_ltree, &splay->ist_ltreep,
- &splay->ist_rtree, &splay->ist_rtreep);
- splay->ist_name = name;
- splay->ist_root = root;
- }
-
- rname = root->ite_name;
-
- /*
- * OK, it's a hack. We convert the ltreep and rtreep
- * pointers back into real entry pointers,
- * so we can pick the names out of the entries.
- */
-
- if (rname <= name)
- *lowerp = rname;
- else if (splay->ist_ltreep == &splay->ist_ltree)
- *lowerp = MACH_PORT_LARGEST;
- else {
- ipc_tree_entry_t entry;
-
- entry = (ipc_tree_entry_t)
- ((char *)splay->ist_ltreep -
- ((char *)&root->ite_rchild -
- (char *)root));
- *lowerp = entry->ite_name;
- }
-
- if (rname >= name)
- *upperp = rname;
- else if (splay->ist_rtreep == &splay->ist_rtree)
- *upperp = MACH_PORT_SMALLEST;
- else {
- ipc_tree_entry_t entry;
-
- entry = (ipc_tree_entry_t)
- ((char *)splay->ist_rtreep -
- ((char *)&root->ite_lchild -
- (char *)root));
- *upperp = entry->ite_name;
- }
- }
-
- ist_unlock(splay);
-}
-
-/*
- * Routine: ipc_splay_traverse_start
- * Routine: ipc_splay_traverse_next
- * Routine: ipc_splay_traverse_finish
- * Purpose:
- * Perform a symmetric order traversal of a splay tree.
- * Usage:
- * for (entry = ipc_splay_traverse_start(splay);
- * entry != ITE_NULL;
- * entry = ipc_splay_traverse_next(splay, delete)) {
- * do something with entry
- * }
- * ipc_splay_traverse_finish(splay);
- *
- * If "delete" is TRUE, then the current entry
- * is removed from the tree and deallocated.
- *
- * During the traversal, the splay tree is locked.
- */
-
-ipc_tree_entry_t
-ipc_splay_traverse_start(
- ipc_splay_tree_t splay)
-{
- ipc_tree_entry_t current, parent;
-
- ist_lock(splay);
-
- current = splay->ist_root;
- if (current != ITE_NULL) {
- ipc_splay_prim_assemble(current,
- &splay->ist_ltree, splay->ist_ltreep,
- &splay->ist_rtree, splay->ist_rtreep);
-
- parent = ITE_NULL;
-
- while (current->ite_lchild != ITE_NULL) {
- ipc_tree_entry_t next;
-
- next = current->ite_lchild;
- current->ite_lchild = parent;
- parent = current;
- current = next;
- }
-
- splay->ist_ltree = current;
- splay->ist_rtree = parent;
- }
-
- return current;
-}
-
-ipc_tree_entry_t
-ipc_splay_traverse_next(
- ipc_splay_tree_t splay,
- boolean_t delete)
-{
- ipc_tree_entry_t current, parent;
-
- /* pick up where traverse_entry left off */
-
- current = splay->ist_ltree;
- parent = splay->ist_rtree;
- assert(current != ITE_NULL);
-
- if (!delete)
- goto traverse_right;
-
- /* we must delete current and patch the tree */
-
- if (current->ite_lchild == ITE_NULL) {
- if (current->ite_rchild == ITE_NULL) {
- /* like traverse_back, but with deletion */
-
- if (parent == ITE_NULL) {
- ite_free(current);
-
- splay->ist_root = ITE_NULL;
- return ITE_NULL;
- }
-
- if (current->ite_name < parent->ite_name) {
- ite_free(current);
-
- current = parent;
- parent = current->ite_lchild;
- current->ite_lchild = ITE_NULL;
- goto traverse_entry;
- } else {
- ite_free(current);
-
- current = parent;
- parent = current->ite_rchild;
- current->ite_rchild = ITE_NULL;
- goto traverse_back;
- }
- } else {
- ipc_tree_entry_t prev;
-
- prev = current;
- current = current->ite_rchild;
- ite_free(prev);
- goto traverse_left;
- }
- } else {
- if (current->ite_rchild == ITE_NULL) {
- ipc_tree_entry_t prev;
-
- prev = current;
- current = current->ite_lchild;
- ite_free(prev);
- goto traverse_back;
- } else {
- ipc_tree_entry_t prev;
- ipc_tree_entry_t ltree, rtree;
- ipc_tree_entry_t *ltreep, *rtreep;
-
- /* replace current with largest of left children */
-
- prev = current;
- ipc_splay_prim_lookup(MACH_PORT_LARGEST,
- current->ite_lchild, &current,
- &ltree, &ltreep, &rtree, &rtreep);
- ipc_splay_prim_assemble(current,
- &ltree, ltreep, &rtree, rtreep);
-
- assert(current->ite_rchild == ITE_NULL);
- current->ite_rchild = prev->ite_rchild;
- ite_free(prev);
- goto traverse_right;
- }
- }
- /*NOTREACHED*/
-
- /*
- * A state machine: for each entry, we
- * 1) traverse left subtree
- * 2) traverse the entry
- * 3) traverse right subtree
- * 4) traverse back to parent
- */
-
- traverse_left:
- if (current->ite_lchild != ITE_NULL) {
- ipc_tree_entry_t next;
-
- next = current->ite_lchild;
- current->ite_lchild = parent;
- parent = current;
- current = next;
- goto traverse_left;
- }
-
- traverse_entry:
- splay->ist_ltree = current;
- splay->ist_rtree = parent;
- return current;
-
- traverse_right:
- if (current->ite_rchild != ITE_NULL) {
- ipc_tree_entry_t next;
-
- next = current->ite_rchild;
- current->ite_rchild = parent;
- parent = current;
- current = next;
- goto traverse_left;
- }
-
- traverse_back:
- if (parent == ITE_NULL) {
- splay->ist_root = current;
- return ITE_NULL;
- }
-
- if (current->ite_name < parent->ite_name) {
- ipc_tree_entry_t prev;
-
- prev = current;
- current = parent;
- parent = current->ite_lchild;
- current->ite_lchild = prev;
- goto traverse_entry;
- } else {
- ipc_tree_entry_t prev;
-
- prev = current;
- current = parent;
- parent = current->ite_rchild;
- current->ite_rchild = prev;
- goto traverse_back;
- }
-}
-
-void
-ipc_splay_traverse_finish(
- ipc_splay_tree_t splay)
-{
- ipc_tree_entry_t root;
-
- root = splay->ist_root;
- if (root != ITE_NULL) {
- splay->ist_name = root->ite_name;
- splay->ist_ltreep = &splay->ist_ltree;
- splay->ist_rtreep = &splay->ist_rtree;
- }
-
- ist_unlock(splay);
-}
-
diff --git a/ipc/ipc_splay.h b/ipc/ipc_splay.h
deleted file mode 100644
index d3316ef8..00000000
--- a/ipc/ipc_splay.h
+++ /dev/null
@@ -1,114 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- */
-/*
- * File: ipc/ipc_splay.h
- * Author: Rich Draves
- * Date: 1989
- *
- * Declarations of primitive splay tree operations.
- */
-
-#ifndef _IPC_IPC_SPLAY_H_
-#define _IPC_IPC_SPLAY_H_
-
-#include <mach/port.h>
-#include <kern/assert.h>
-#include <kern/macro_help.h>
-#include <ipc/ipc_entry.h>
-
-typedef struct ipc_splay_tree {
- mach_port_t ist_name; /* name used in last lookup */
- ipc_tree_entry_t ist_root; /* root of middle tree */
- ipc_tree_entry_t ist_ltree; /* root of left tree */
- ipc_tree_entry_t *ist_ltreep; /* pointer into left tree */
- ipc_tree_entry_t ist_rtree; /* root of right tree */
- ipc_tree_entry_t *ist_rtreep; /* pointer into right tree */
-} *ipc_splay_tree_t;
-
-#define ist_lock(splay) /* no locking */
-#define ist_unlock(splay) /* no locking */
-
-/* Initialize a raw splay tree */
-extern void ipc_splay_tree_init(
- ipc_splay_tree_t splay);
-
-/* Pick a random entry in a splay tree */
-extern boolean_t ipc_splay_tree_pick(
- ipc_splay_tree_t splay,
- mach_port_t *namep,
- ipc_tree_entry_t *entryp);
-
-/* Find an entry in a splay tree */
-extern ipc_tree_entry_t ipc_splay_tree_lookup(
- ipc_splay_tree_t splay,
- mach_port_t name);
-
-/* Insert a new entry into a splay tree */
-extern void ipc_splay_tree_insert(
- ipc_splay_tree_t splay,
- mach_port_t name,
- ipc_tree_entry_t entry);
-
-/* Delete an entry from a splay tree */
-extern void ipc_splay_tree_delete(
- ipc_splay_tree_t splay,
- mach_port_t name,
- ipc_tree_entry_t entry);
-
-/* Split a splay tree */
-extern void ipc_splay_tree_split(
- ipc_splay_tree_t splay,
- mach_port_t name,
- ipc_splay_tree_t entry);
-
-/* Join two splay trees */
-extern void ipc_splay_tree_join(
- ipc_splay_tree_t splay,
- ipc_splay_tree_t small);
-
-/* Do a bounded splay tree lookup */
-extern void ipc_splay_tree_bounds(
- ipc_splay_tree_t splay,
- mach_port_t name,
- mach_port_t *lowerp,
- mach_port_t *upperp);
-
-/* Initialize a symmetric order traversal of a splay tree */
-extern ipc_tree_entry_t ipc_splay_traverse_start(
- ipc_splay_tree_t splay);
-
-/* Return the next entry in a symmetric order traversal of a splay tree */
-extern ipc_tree_entry_t ipc_splay_traverse_next(
- ipc_splay_tree_t splay,
- boolean_t delete);
-
-/* Terminate a symmetric order traversal of a splay tree */
-extern void ipc_splay_traverse_finish(
- ipc_splay_tree_t splay);
-
-#endif /* _IPC_IPC_SPLAY_H_ */
diff --git a/ipc/ipc_table.c b/ipc/ipc_table.c
index cbb6a894..0f8592a3 100644
--- a/ipc/ipc_table.c
+++ b/ipc/ipc_table.c
@@ -42,20 +42,8 @@
#include <kern/slab.h>
#include <vm/vm_kern.h>
-/*
- * Forward declarations
- */
-void ipc_table_fill(
- ipc_table_size_t its,
- unsigned int num,
- unsigned int min,
- vm_size_t elemsize);
-
-ipc_table_size_t ipc_table_entries;
-unsigned int ipc_table_entries_size = 512;
-
ipc_table_size_t ipc_table_dnrequests;
-unsigned int ipc_table_dnrequests_size = 64;
+const unsigned int ipc_table_dnrequests_size = 64;
void
ipc_table_fill(
@@ -101,20 +89,6 @@ ipc_table_fill(
void
ipc_table_init(void)
{
- ipc_table_entries = (ipc_table_size_t)
- kalloc(sizeof(struct ipc_table_size) *
- ipc_table_entries_size);
- assert(ipc_table_entries != ITS_NULL);
-
- ipc_table_fill(ipc_table_entries, ipc_table_entries_size - 1,
- 4, sizeof(struct ipc_entry));
-
- /* the last two elements should have the same size */
-
- ipc_table_entries[ipc_table_entries_size - 1].its_size =
- ipc_table_entries[ipc_table_entries_size - 2].its_size;
-
-
ipc_table_dnrequests = (ipc_table_size_t)
kalloc(sizeof(struct ipc_table_size) *
ipc_table_dnrequests_size);
@@ -140,42 +114,7 @@ vm_offset_t
ipc_table_alloc(
vm_size_t size)
{
- vm_offset_t table;
-
- if (size < PAGE_SIZE)
- table = kalloc(size);
- else
- if (kmem_alloc(kmem_map, &table, size) != KERN_SUCCESS)
- table = 0;
-
- return table;
-}
-
-/*
- * Routine: ipc_table_realloc
- * Purpose:
- * Reallocate a big table.
- *
- * The new table remaps the old table,
- * so copying is not necessary.
- * Conditions:
- * Only works for page-size or bigger tables.
- * May block.
- */
-
-vm_offset_t
-ipc_table_realloc(
- vm_size_t old_size,
- vm_offset_t old_table,
- vm_size_t new_size)
-{
- vm_offset_t new_table;
-
- if (kmem_realloc(kmem_map, old_table, old_size,
- &new_table, new_size) != KERN_SUCCESS)
- new_table = 0;
-
- return new_table;
+ return kalloc(size);
}
/*
@@ -192,8 +131,5 @@ ipc_table_free(
vm_size_t size,
vm_offset_t table)
{
- if (size < PAGE_SIZE)
- kfree(table, size);
- else
- kmem_free(kmem_map, table, size);
+ kfree(table, size);
}
diff --git a/ipc/ipc_table.h b/ipc/ipc_table.h
index 695adae4..7968e6bb 100644
--- a/ipc/ipc_table.h
+++ b/ipc/ipc_table.h
@@ -30,8 +30,8 @@
* Author: Rich Draves
* Date: 1989
*
- * Definitions for tables, used for IPC capabilities (ipc_entry_t)
- * and dead-name requests (ipc_port_request_t).
+ * Definitions for tables, used for dead-name requests
+ * (ipc_port_request_t).
*/
#ifndef _IPC_IPC_TABLE_H_
@@ -41,20 +41,7 @@
#include <mach/vm_param.h>
/*
- * The is_table_next field of an ipc_space_t points to
- * an ipc_table_size structure. These structures must
- * be elements of an array, ipc_table_entries.
- *
- * The array must end with two elements with the same its_size value.
- * Except for the terminating element, the its_size values must
- * be strictly increasing. The largest (last) its_size value
- * must be less than or equal to MACH_PORT_INDEX(MACH_PORT_DEAD).
- * This ensures that
- * 1) MACH_PORT_INDEX(MACH_PORT_DEAD) isn't a valid index
- * in the table, so ipc_entry_get won't allocate it.
- * 2) MACH_PORT_MAKE(index+1, 0) and MAKE_PORT_MAKE(size, 0)
- * won't ever overflow.
- *
+ * Every its_size value must must be a power of two.
*
* The ipr_size field of the first element in a table of
* dead-name requests (ipc_port_request_t) points to the
@@ -63,8 +50,6 @@
* with an element with zero its_size, and except for this last
* element, the its_size values must be strictly increasing.
*
- * The is_table_next field points to the ipc_table_size structure
- * for the next larger size of table, not the one currently in use.
* The ipr_size field points to the currently used ipc_table_size.
*/
@@ -77,53 +62,31 @@ typedef struct ipc_table_size {
#define ITS_NULL ((ipc_table_size_t) 0)
-extern ipc_table_size_t ipc_table_entries;
extern ipc_table_size_t ipc_table_dnrequests;
extern void
ipc_table_init(void);
/*
- * Note that ipc_table_alloc, ipc_table_realloc, and ipc_table_free
- * all potentially use the VM system. Hence simple locks can't
- * be held across them.
- *
- * We can't use a copying realloc, because the realloc happens
- * with the data unlocked. ipc_table_realloc remaps the data,
- * so it is OK.
+ * Note that ipc_table_alloc, and ipc_table_free all potentially
+ * use the VM system. Hence simple locks can't be held across
+ * them.
*/
/* Allocate a table */
extern vm_offset_t ipc_table_alloc(
vm_size_t size);
-/* Reallocate a big table */
-extern vm_offset_t ipc_table_realloc(
- vm_size_t old_size,
- vm_offset_t old_table,
- vm_size_t new_size);
-
/* Free a table */
extern void ipc_table_free(
vm_size_t size,
vm_offset_t table);
-#define it_entries_alloc(its) \
- ((ipc_entry_t) \
- ipc_table_alloc((its)->its_size * sizeof(struct ipc_entry)))
-
-#define it_entries_reallocable(its) \
- (((its)->its_size * sizeof(struct ipc_entry)) >= PAGE_SIZE)
-
-#define it_entries_realloc(its, table, nits) \
- ((ipc_entry_t) \
- ipc_table_realloc((its)->its_size * sizeof(struct ipc_entry), \
- (vm_offset_t)(table), \
- (nits)->its_size * sizeof(struct ipc_entry)))
-
-#define it_entries_free(its, table) \
- ipc_table_free((its)->its_size * sizeof(struct ipc_entry), \
- (vm_offset_t)(table))
+void ipc_table_fill(
+ ipc_table_size_t its,
+ unsigned int num,
+ unsigned int min,
+ vm_size_t elemsize);
#define it_dnrequests_alloc(its) \
((ipc_port_request_t) \
diff --git a/ipc/ipc_thread.h b/ipc/ipc_thread.h
index fbeea46a..008ab4a9 100644
--- a/ipc/ipc_thread.h
+++ b/ipc/ipc_thread.h
@@ -75,7 +75,7 @@ MACRO_END
#define ipc_thread_rmqueue_first_macro(queue, thread) \
MACRO_BEGIN \
- register ipc_thread_t _next; \
+ ipc_thread_t _next; \
\
assert((queue)->ithq_base == (thread)); \
\
@@ -84,7 +84,7 @@ MACRO_BEGIN \
assert((thread)->ith_prev == (thread)); \
(queue)->ithq_base = ITH_NULL; \
} else { \
- register ipc_thread_t _prev = (thread)->ith_prev; \
+ ipc_thread_t _prev = (thread)->ith_prev; \
\
(queue)->ithq_base = _next; \
_next->ith_prev = _prev; \
@@ -95,14 +95,14 @@ MACRO_END
#define ipc_thread_enqueue_macro(queue, thread) \
MACRO_BEGIN \
- register ipc_thread_t _first = (queue)->ithq_base; \
+ ipc_thread_t _first = (queue)->ithq_base; \
\
if (_first == ITH_NULL) { \
(queue)->ithq_base = (thread); \
assert((thread)->ith_next == (thread)); \
assert((thread)->ith_prev == (thread)); \
} else { \
- register ipc_thread_t _last = _first->ith_prev; \
+ ipc_thread_t _last = _first->ith_prev; \
\
(thread)->ith_next = _first; \
(thread)->ith_prev = _last; \
diff --git a/ipc/mach_debug.c b/ipc/mach_debug.c
index 28dd6935..efb07a4f 100644
--- a/ipc/mach_debug.c
+++ b/ipc/mach_debug.c
@@ -46,7 +46,6 @@
#include <vm/vm_kern.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_port.h>
-#include <ipc/ipc_hash.h>
#include <ipc/ipc_marequest.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_right.h>
@@ -94,85 +93,6 @@ mach_port_get_srights(
}
/*
- * Routine: host_ipc_hash_info
- * Purpose:
- * Return information about the global reverse hash table.
- * Conditions:
- * Nothing locked. Obeys CountInOut protocol.
- * Returns:
- * KERN_SUCCESS Returned information.
- * KERN_INVALID_HOST The host is null.
- * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
- */
-
-kern_return_t
-host_ipc_hash_info(
- host_t host,
- hash_info_bucket_array_t *infop,
- mach_msg_type_number_t *countp)
-{
- vm_offset_t addr;
- vm_size_t size = 0; /* Suppress gcc warning */
- hash_info_bucket_t *info;
- unsigned int potential, actual;
- kern_return_t kr;
-
- if (host == HOST_NULL)
- return KERN_INVALID_HOST;
-
- /* start with in-line data */
-
- info = *infop;
- potential = *countp;
-
- for (;;) {
- actual = ipc_hash_info(info, potential);
- if (actual <= potential)
- break;
-
- /* allocate more memory */
-
- if (info != *infop)
- kmem_free(ipc_kernel_map, addr, size);
-
- size = round_page(actual * sizeof *info);
- kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
- if (kr != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
-
- info = (hash_info_bucket_t *) addr;
- potential = size/sizeof *info;
- }
-
- if (info == *infop) {
- /* data fit in-line; nothing to deallocate */
-
- *countp = actual;
- } else if (actual == 0) {
- kmem_free(ipc_kernel_map, addr, size);
-
- *countp = 0;
- } else {
- vm_map_copy_t copy;
- vm_size_t used;
-
- used = round_page(actual * sizeof *info);
-
- if (used != size)
- kmem_free(ipc_kernel_map, addr + used, size - used);
-
- kr = vm_map_copyin(ipc_kernel_map, addr, used,
- TRUE, &copy);
- assert(kr == KERN_SUCCESS);
-
- *infop = (hash_info_bucket_t *) copy;
- *countp = actual;
- }
-
- return KERN_SUCCESS;
-}
-
-/*
* Routine: host_ipc_marequest_info
* Purpose:
* Return information about the marequest hash table.
@@ -185,11 +105,11 @@ host_ipc_hash_info(
*/
kern_return_t
-host_ipc_marequest_info(host, maxp, infop, countp)
- host_t host;
- unsigned int *maxp;
- hash_info_bucket_array_t *infop;
- unsigned int *countp;
+host_ipc_marequest_info(
+ host_t host,
+ unsigned int *maxp,
+ hash_info_bucket_array_t *infop,
+ unsigned int *countp)
{
vm_offset_t addr;
vm_size_t size = 0; /* '=0' to shut up lint */
@@ -253,251 +173,6 @@ host_ipc_marequest_info(host, maxp, infop, countp)
}
/*
- * Routine: mach_port_space_info
- * Purpose:
- * Returns information about an IPC space.
- * Conditions:
- * Nothing locked. Obeys CountInOut protocol.
- * Returns:
- * KERN_SUCCESS Returned information.
- * KERN_INVALID_TASK The space is null.
- * KERN_INVALID_TASK The space is dead.
- * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
- */
-
-kern_return_t
-mach_port_space_info(
- ipc_space_t space,
- ipc_info_space_t *infop,
- ipc_info_name_array_t *tablep,
- mach_msg_type_number_t *tableCntp,
- ipc_info_tree_name_array_t *treep,
- mach_msg_type_number_t *treeCntp)
-{
- ipc_info_name_t *table_info;
- unsigned int table_potential, table_actual;
- vm_offset_t table_addr;
- vm_size_t table_size = 0; /* Suppress gcc warning */
- ipc_info_tree_name_t *tree_info;
- unsigned int tree_potential, tree_actual;
- vm_offset_t tree_addr;
- vm_size_t tree_size = 0; /* Suppress gcc warning */
- ipc_tree_entry_t tentry;
- ipc_entry_t table;
- ipc_entry_num_t tsize;
- mach_port_index_t index;
- kern_return_t kr;
-
- if (space == IS_NULL)
- return KERN_INVALID_TASK;
-
- /* start with in-line memory */
-
- table_info = *tablep;
- table_potential = *tableCntp;
- tree_info = *treep;
- tree_potential = *treeCntp;
-
- for (;;) {
- is_read_lock(space);
- if (!space->is_active) {
- is_read_unlock(space);
- if (table_info != *tablep)
- kmem_free(ipc_kernel_map,
- table_addr, table_size);
- if (tree_info != *treep)
- kmem_free(ipc_kernel_map,
- tree_addr, tree_size);
- return KERN_INVALID_TASK;
- }
-
- table_actual = space->is_table_size;
- tree_actual = space->is_tree_total;
-
- if ((table_actual <= table_potential) &&
- (tree_actual <= tree_potential))
- break;
-
- is_read_unlock(space);
-
- if (table_actual > table_potential) {
- if (table_info != *tablep)
- kmem_free(ipc_kernel_map,
- table_addr, table_size);
-
- table_size = round_page(table_actual *
- sizeof *table_info);
- kr = kmem_alloc(ipc_kernel_map,
- &table_addr, table_size);
- if (kr != KERN_SUCCESS) {
- if (tree_info != *treep)
- kmem_free(ipc_kernel_map,
- tree_addr, tree_size);
-
- return KERN_RESOURCE_SHORTAGE;
- }
-
- table_info = (ipc_info_name_t *) table_addr;
- table_potential = table_size/sizeof *table_info;
- }
-
- if (tree_actual > tree_potential) {
- if (tree_info != *treep)
- kmem_free(ipc_kernel_map,
- tree_addr, tree_size);
-
- tree_size = round_page(tree_actual *
- sizeof *tree_info);
- kr = kmem_alloc(ipc_kernel_map,
- &tree_addr, tree_size);
- if (kr != KERN_SUCCESS) {
- if (table_info != *tablep)
- kmem_free(ipc_kernel_map,
- table_addr, table_size);
-
- return KERN_RESOURCE_SHORTAGE;
- }
-
- tree_info = (ipc_info_tree_name_t *) tree_addr;
- tree_potential = tree_size/sizeof *tree_info;
- }
- }
- /* space is read-locked and active; we have enough wired memory */
-
- infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
- infop->iis_table_size = space->is_table_size;
- infop->iis_table_next = space->is_table_next->its_size;
- infop->iis_tree_size = space->is_tree_total;
- infop->iis_tree_small = space->is_tree_small;
- infop->iis_tree_hash = space->is_tree_hash;
-
- table = space->is_table;
- tsize = space->is_table_size;
-
- for (index = 0; index < tsize; index++) {
- ipc_info_name_t *iin = &table_info[index];
- ipc_entry_t entry = &table[index];
- ipc_entry_bits_t bits = entry->ie_bits;
-
- iin->iin_name = MACH_PORT_MAKEB(index, bits);
- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
- iin->iin_compat = FALSE;
- iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
- iin->iin_type = IE_BITS_TYPE(bits);
- iin->iin_urefs = IE_BITS_UREFS(bits);
- iin->iin_object = (vm_offset_t) entry->ie_object;
- iin->iin_next = entry->ie_next;
- iin->iin_hash = entry->ie_index;
- }
-
- for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
- ipc_info_tree_name_t *iitn = &tree_info[index++];
- ipc_info_name_t *iin = &iitn->iitn_name;
- ipc_entry_t entry = &tentry->ite_entry;
- ipc_entry_bits_t bits = entry->ie_bits;
-
- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
-
- iin->iin_name = tentry->ite_name;
- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
- iin->iin_compat = FALSE;
- iin->iin_marequest = (bits & IE_BITS_MAREQUEST) ? TRUE : FALSE;
- iin->iin_type = IE_BITS_TYPE(bits);
- iin->iin_urefs = IE_BITS_UREFS(bits);
- iin->iin_object = (vm_offset_t) entry->ie_object;
- iin->iin_next = entry->ie_next;
- iin->iin_hash = entry->ie_index;
-
- if (tentry->ite_lchild == ITE_NULL)
- iitn->iitn_lchild = MACH_PORT_NULL;
- else
- iitn->iitn_lchild = tentry->ite_lchild->ite_name;
-
- if (tentry->ite_rchild == ITE_NULL)
- iitn->iitn_rchild = MACH_PORT_NULL;
- else
- iitn->iitn_rchild = tentry->ite_rchild->ite_name;
-
- }
- ipc_splay_traverse_finish(&space->is_tree);
- is_read_unlock(space);
-
- if (table_info == *tablep) {
- /* data fit in-line; nothing to deallocate */
-
- *tableCntp = table_actual;
- } else if (table_actual == 0) {
- kmem_free(ipc_kernel_map, table_addr, table_size);
-
- *tableCntp = 0;
- } else {
- vm_size_t size_used, rsize_used;
- vm_map_copy_t copy;
-
- /* kmem_alloc doesn't zero memory */
-
- size_used = table_actual * sizeof *table_info;
- rsize_used = round_page(size_used);
-
- if (rsize_used != table_size)
- kmem_free(ipc_kernel_map,
- table_addr + rsize_used,
- table_size - rsize_used);
-
- if (size_used != rsize_used)
- memset((char *) (table_addr + size_used), 0,
- rsize_used - size_used);
-
- kr = vm_map_copyin(ipc_kernel_map, table_addr, rsize_used,
- TRUE, &copy);
-
- assert(kr == KERN_SUCCESS);
-
- *tablep = (ipc_info_name_t *) copy;
- *tableCntp = table_actual;
- }
-
- if (tree_info == *treep) {
- /* data fit in-line; nothing to deallocate */
-
- *treeCntp = tree_actual;
- } else if (tree_actual == 0) {
- kmem_free(ipc_kernel_map, tree_addr, tree_size);
-
- *treeCntp = 0;
- } else {
- vm_size_t size_used, rsize_used;
- vm_map_copy_t copy;
-
- /* kmem_alloc doesn't zero memory */
-
- size_used = tree_actual * sizeof *tree_info;
- rsize_used = round_page(size_used);
-
- if (rsize_used != tree_size)
- kmem_free(ipc_kernel_map,
- tree_addr + rsize_used,
- tree_size - rsize_used);
-
- if (size_used != rsize_used)
- memset((char *) (tree_addr + size_used), 0,
- rsize_used - size_used);
-
- kr = vm_map_copyin(ipc_kernel_map, tree_addr, rsize_used,
- TRUE, &copy);
-
- assert(kr == KERN_SUCCESS);
-
- *treep = (ipc_info_tree_name_t *) copy;
- *treeCntp = tree_actual;
- }
-
- return KERN_SUCCESS;
-}
-
-/*
* Routine: mach_port_dnrequest_info
* Purpose:
* Returns information about the dead-name requests
@@ -603,8 +278,8 @@ mach_port_kernel_object(
return KERN_INVALID_RIGHT;
}
- *typep = (unsigned int) ip_kotype(port);
- *addrp = (vm_offset_t) port->ip_kobject;
+ *typep = ip_kotype(port);
+ *addrp = port->ip_kobject;
ip_unlock(port);
return KERN_SUCCESS;
}
diff --git a/ipc/mach_msg.c b/ipc/mach_msg.c
index 00ab085b..fe0c43e3 100644
--- a/ipc/mach_msg.c
+++ b/ipc/mach_msg.c
@@ -46,6 +46,7 @@
#include <kern/printf.h>
#include <kern/sched_prim.h>
#include <kern/ipc_sched.h>
+#include <kern/exception.h>
#include <vm/vm_map.h>
#include <ipc/ipc_kmsg.h>
#include <ipc/ipc_marequest.h>
@@ -61,9 +62,6 @@
#include <machine/locore.h>
#include <machine/pcb.h>
-extern void exception_raise_continue();
-extern void exception_raise_continue_fast();
-
/*
* Routine: mach_msg_send
* Purpose:
@@ -90,12 +88,12 @@ extern void exception_raise_continue_fast();
*/
mach_msg_return_t
-mach_msg_send(msg, option, send_size, time_out, notify)
- mach_msg_header_t *msg;
- mach_msg_option_t option;
- mach_msg_size_t send_size;
- mach_msg_timeout_t time_out;
- mach_port_t notify;
+mach_msg_send(
+ mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_timeout_t time_out,
+ mach_port_t notify)
{
ipc_space_t space = current_space();
vm_map_t map = current_map();
@@ -172,13 +170,13 @@ mach_msg_send(msg, option, send_size, time_out, notify)
*/
mach_msg_return_t
-mach_msg_receive(msg, option, rcv_size, rcv_name, time_out, notify)
- mach_msg_header_t *msg;
- mach_msg_option_t option;
- mach_msg_size_t rcv_size;
- mach_port_t rcv_name;
- mach_msg_timeout_t time_out;
- mach_port_t notify;
+mach_msg_receive(
+ mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t rcv_size,
+ mach_port_t rcv_name,
+ mach_msg_timeout_t time_out,
+ mach_port_t notify)
{
ipc_thread_t self = current_thread();
ipc_space_t space = current_space();
@@ -381,26 +379,26 @@ mach_msg_receive_continue(void)
*/
mach_msg_return_t
-mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
- mach_msg_header_t *msg;
- mach_msg_option_t option;
- mach_msg_size_t send_size;
- mach_msg_size_t rcv_size;
- mach_port_t rcv_name;
- mach_msg_timeout_t time_out;
- mach_port_t notify;
+mach_msg_trap(
+ mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_t rcv_name,
+ mach_msg_timeout_t time_out,
+ mach_port_t notify)
{
mach_msg_return_t mr;
/* first check for common cases */
if (option == (MACH_SEND_MSG|MACH_RCV_MSG)) {
- register ipc_thread_t self = current_thread();
+ ipc_thread_t self = current_thread();
ipc_space_t space = self->task->itk_space;
- register ipc_kmsg_t kmsg;
- register ipc_port_t dest_port;
+ ipc_kmsg_t kmsg;
+ ipc_port_t dest_port;
ipc_object_t rcv_object;
- register ipc_mqueue_t rcv_mqueue;
+ ipc_mqueue_t rcv_mqueue;
mach_msg_size_t reply_size;
/*
@@ -484,85 +482,38 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
switch (kmsg->ikm_header.msgh_bits) {
case MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND,
MACH_MSG_TYPE_MAKE_SEND_ONCE): {
- register ipc_entry_t table;
- register ipc_entry_num_t size;
- register ipc_port_t reply_port;
-
- /* sending a request message */
-
+ ipc_port_t reply_port;
{
- register mach_port_index_t index;
- register mach_port_gen_t gen;
-
- {
- register mach_port_t reply_name =
+ mach_port_t reply_name =
kmsg->ikm_header.msgh_local_port;
if (reply_name != rcv_name)
goto slow_copyin;
- /* optimized ipc_entry_lookup of reply_name */
-
- index = MACH_PORT_INDEX(reply_name);
- gen = MACH_PORT_GEN(reply_name);
- }
-
is_read_lock(space);
assert(space->is_active);
- size = space->is_table_size;
- table = space->is_table;
-
- if (index >= size)
- goto abort_request_copyin;
-
- {
- register ipc_entry_t entry;
- register ipc_entry_bits_t bits;
-
- entry = &table[index];
- bits = entry->ie_bits;
-
- /* check generation number and type bit */
-
- if ((bits & (IE_BITS_GEN_MASK|
- MACH_PORT_TYPE_RECEIVE)) !=
- (gen | MACH_PORT_TYPE_RECEIVE))
+ ipc_entry_t entry;
+ entry = ipc_entry_lookup (space, reply_name);
+ if (entry == IE_NULL)
goto abort_request_copyin;
-
reply_port = (ipc_port_t) entry->ie_object;
assert(reply_port != IP_NULL);
}
- }
-
- /* optimized ipc_entry_lookup of dest_name */
-
- {
- register mach_port_index_t index;
- register mach_port_gen_t gen;
{
- register mach_port_t dest_name =
+ mach_port_t dest_name =
kmsg->ikm_header.msgh_remote_port;
- index = MACH_PORT_INDEX(dest_name);
- gen = MACH_PORT_GEN(dest_name);
- }
-
- if (index >= size)
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
goto abort_request_copyin;
-
- {
- register ipc_entry_t entry;
- register ipc_entry_bits_t bits;
-
- entry = &table[index];
bits = entry->ie_bits;
- /* check generation number and type bit */
-
- if ((bits & (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) !=
- (gen | MACH_PORT_TYPE_SEND))
+ /* check type bits */
+ if (IE_BITS_TYPE (bits) != MACH_PORT_TYPE_SEND)
goto abort_request_copyin;
assert(IE_BITS_UREFS(bits) > 0);
@@ -570,7 +521,6 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
dest_port = (ipc_port_t) entry->ie_object;
assert(dest_port != IP_NULL);
}
- }
/*
* To do an atomic copyin, need simultaneous
@@ -651,13 +601,10 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
}
case MACH_MSGH_BITS(MACH_MSG_TYPE_MOVE_SEND_ONCE, 0): {
- register ipc_entry_num_t size;
- register ipc_entry_t table;
-
/* sending a reply message */
{
- register mach_port_t reply_name =
+ mach_port_t reply_name =
kmsg->ikm_header.msgh_local_port;
if (reply_name != MACH_PORT_NULL)
@@ -667,35 +614,18 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
is_write_lock(space);
assert(space->is_active);
- /* optimized ipc_entry_lookup */
-
- size = space->is_table_size;
- table = space->is_table;
-
- {
- register ipc_entry_t entry;
- register mach_port_gen_t gen;
- register mach_port_index_t index;
-
{
- register mach_port_t dest_name =
+ ipc_entry_t entry;
+ mach_port_t dest_name =
kmsg->ikm_header.msgh_remote_port;
- index = MACH_PORT_INDEX(dest_name);
- gen = MACH_PORT_GEN(dest_name);
- }
-
- if (index >= size)
+ entry = ipc_entry_lookup (space, dest_name);
+ if (entry == IE_NULL)
goto abort_reply_dest_copyin;
- entry = &table[index];
-
- /* check generation, collision bit, and type bit */
-
- if ((entry->ie_bits & (IE_BITS_GEN_MASK|
- IE_BITS_COLLISION|
- MACH_PORT_TYPE_SEND_ONCE)) !=
- (gen | MACH_PORT_TYPE_SEND_ONCE))
+ /* check type bits */
+ if (IE_BITS_TYPE (entry->ie_bits) !=
+ MACH_PORT_TYPE_SEND_ONCE)
goto abort_reply_dest_copyin;
/* optimized ipc_right_copyin */
@@ -718,13 +648,8 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
}
assert(dest_port->ip_sorights > 0);
-
- /* optimized ipc_entry_dealloc */
-
- entry->ie_next = table->ie_next;
- table->ie_next = index;
- entry->ie_bits = gen;
entry->ie_object = IO_NULL;
+ ipc_entry_dealloc (space, dest_name, entry);
}
kmsg->ikm_header.msgh_bits =
@@ -737,35 +662,20 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
assert(dest_port->ip_receiver != ipc_space_kernel);
- /* optimized ipc_entry_lookup/ipc_mqueue_copyin */
-
- {
- register ipc_entry_t entry;
- register ipc_entry_bits_t bits;
+ /* optimized ipc_mqueue_copyin */
{
- register mach_port_index_t index;
- register mach_port_gen_t gen;
-
- index = MACH_PORT_INDEX(rcv_name);
- gen = MACH_PORT_GEN(rcv_name);
-
- if (index >= size)
+ ipc_entry_t entry;
+ ipc_entry_bits_t bits;
+ entry = ipc_entry_lookup (space, rcv_name);
+ if (entry == IE_NULL)
goto abort_reply_rcv_copyin;
-
- entry = &table[index];
bits = entry->ie_bits;
- /* check generation number */
-
- if ((bits & IE_BITS_GEN_MASK) != gen)
- goto abort_reply_rcv_copyin;
- }
-
/* check type bits; looking for receive or set */
if (bits & MACH_PORT_TYPE_PORT_SET) {
- register ipc_pset_t rcv_pset;
+ ipc_pset_t rcv_pset;
rcv_pset = (ipc_pset_t) entry->ie_object;
assert(rcv_pset != IPS_NULL);
@@ -776,7 +686,7 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
rcv_object = (ipc_object_t) rcv_pset;
rcv_mqueue = &rcv_pset->ips_messages;
} else if (bits & MACH_PORT_TYPE_RECEIVE) {
- register ipc_port_t rcv_port;
+ ipc_port_t rcv_port;
rcv_port = (ipc_port_t) entry->ie_object;
assert(rcv_port != IP_NULL);
@@ -841,11 +751,11 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
MACH_MSGH_BITS_CIRCULAR) == 0);
{
- register ipc_mqueue_t dest_mqueue;
- register ipc_thread_t receiver;
+ ipc_mqueue_t dest_mqueue;
+ ipc_thread_t receiver;
{
- register ipc_pset_t dest_pset;
+ ipc_pset_t dest_pset;
dest_pset = dest_port->ip_pset;
if (dest_pset == IPS_NULL)
@@ -1043,6 +953,7 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
ipc_port_t reply_port =
(ipc_port_t) kmsg->ikm_header.msgh_local_port;
mach_port_t dest_name, reply_name;
+ unsigned long payload;
/* receiving a request message */
@@ -1074,30 +985,19 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
ip_unlock(reply_port);
{
- register ipc_entry_t table;
- register ipc_entry_t entry;
- register mach_port_index_t index;
-
- /* optimized ipc_entry_get */
-
- table = space->is_table;
- index = table->ie_next;
-
- if (index == 0)
+ ipc_entry_t entry;
+ kern_return_t kr;
+ kr = ipc_entry_get (space, &reply_name, &entry);
+ if (kr)
goto abort_request_copyout;
-
- entry = &table[index];
- table->ie_next = entry->ie_next;
- entry->ie_request = 0;
+ assert (entry != NULL);
{
- register mach_port_gen_t gen;
+ mach_port_gen_t gen;
assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
gen = entry->ie_bits + IE_BITS_GEN_ONE;
- reply_name = MACH_PORT_MAKE(index, gen);
-
/* optimized ipc_right_copyout */
entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
@@ -1117,6 +1017,7 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
dest_name = dest_port->ip_receiver_name;
else
dest_name = MACH_PORT_NULL;
+ payload = dest_port->ip_protected_payload;
if ((--dest_port->ip_srights == 0) &&
(dest_port->ip_nsrequest != IP_NULL)) {
@@ -1134,11 +1035,19 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
} else
ip_unlock(dest_port);
- kmsg->ikm_header.msgh_bits =
- MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE,
- MACH_MSG_TYPE_PORT_SEND);
+ if (! ipc_port_flag_protected_payload(dest_port)) {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PORT_SEND);
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ } else {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ MACH_MSG_TYPE_PORT_SEND_ONCE,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD);
+ kmsg->ikm_header.msgh_protected_payload =
+ payload;
+ }
kmsg->ikm_header.msgh_remote_port = reply_name;
- kmsg->ikm_header.msgh_local_port = dest_name;
goto fast_put;
abort_request_copyout:
@@ -1148,7 +1057,8 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
}
case MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
- register mach_port_t dest_name;
+ mach_port_t dest_name;
+ unsigned long payload;
/* receiving a reply message */
@@ -1160,6 +1070,8 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
assert(dest_port->ip_sorights > 0);
+ payload = dest_port->ip_protected_payload;
+
if (dest_port->ip_receiver == space) {
ip_release(dest_port);
dest_port->ip_sorights--;
@@ -1172,17 +1084,26 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
dest_name = MACH_PORT_NULL;
}
- kmsg->ikm_header.msgh_bits =
- MACH_MSGH_BITS(0,
- MACH_MSG_TYPE_PORT_SEND_ONCE);
+ if (! ipc_port_flag_protected_payload(dest_port)) {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ } else {
+ kmsg->ikm_header.msgh_bits = MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD);
+ kmsg->ikm_header.msgh_protected_payload =
+ payload;
+ }
kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
- kmsg->ikm_header.msgh_local_port = dest_name;
goto fast_put;
}
case MACH_MSGH_BITS_COMPLEX|
MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0): {
- register mach_port_t dest_name;
+ mach_port_t dest_name;
+ unsigned long payload;
/* receiving a complex reply message */
@@ -1194,6 +1115,8 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
assert(dest_port->ip_sorights > 0);
+ payload = dest_port->ip_protected_payload;
+
if (dest_port->ip_receiver == space) {
ip_release(dest_port);
dest_port->ip_sorights--;
@@ -1206,12 +1129,23 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
dest_name = MACH_PORT_NULL;
}
- kmsg->ikm_header.msgh_bits =
- MACH_MSGH_BITS_COMPLEX |
- MACH_MSGH_BITS(0,
- MACH_MSG_TYPE_PORT_SEND_ONCE);
+ if (! ipc_port_flag_protected_payload(dest_port)) {
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX
+ | MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PORT_SEND_ONCE);
+ kmsg->ikm_header.msgh_local_port = dest_name;
+ } else {
+ kmsg->ikm_header.msgh_bits =
+ MACH_MSGH_BITS_COMPLEX
+ | MACH_MSGH_BITS(
+ 0,
+ MACH_MSG_TYPE_PROTECTED_PAYLOAD);
+ kmsg->ikm_header.msgh_protected_payload =
+ payload;
+ }
kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
- kmsg->ikm_header.msgh_local_port = dest_name;
mr = ipc_kmsg_copyout_body(
(vm_offset_t) (&kmsg->ikm_header + 1),
@@ -1322,7 +1256,7 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
* It will work if this is a request message.
*/
- register ipc_port_t reply_port;
+ ipc_port_t reply_port;
reply_port = (ipc_port_t)
kmsg->ikm_header.msgh_local_port;
@@ -1357,7 +1291,7 @@ mach_msg_trap(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
*/
{
- register ipc_port_t reply_port;
+ ipc_port_t reply_port;
/*
* Perform the kernel function.
@@ -1735,8 +1669,7 @@ mach_msg_continue(void)
*/
boolean_t
-mach_msg_interrupt(thread)
- thread_t thread;
+mach_msg_interrupt(thread_t thread)
{
ipc_mqueue_t mqueue;
diff --git a/ipc/mach_port.c b/ipc/mach_port.c
index 46cb4de4..93a1248f 100644
--- a/ipc/mach_port.c
+++ b/ipc/mach_port.c
@@ -150,10 +150,6 @@ mach_port_names(
mach_port_type_t **typesp,
mach_msg_type_number_t *typesCnt)
{
- ipc_tree_entry_t tentry;
- ipc_entry_t table;
- ipc_entry_num_t tsize;
- mach_port_index_t index;
ipc_entry_num_t actual; /* this many names */
ipc_port_timestamp_t timestamp; /* logical time of this operation */
mach_port_t *names;
@@ -190,7 +186,7 @@ mach_port_names(
/* upper bound on number of names in the space */
- bound = space->is_table_size + space->is_tree_total;
+ bound = space->is_size;
size_needed = round_page(bound * sizeof(mach_port_t));
if (size_needed <= size)
@@ -235,33 +231,16 @@ mach_port_names(
timestamp = ipc_port_timestamp();
- table = space->is_table;
- tsize = space->is_table_size;
-
- for (index = 0; index < tsize; index++) {
- ipc_entry_t entry = &table[index];
+ ipc_entry_t entry;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&space->is_map, &iter, entry) {
ipc_entry_bits_t bits = entry->ie_bits;
if (IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE) {
- mach_port_t name = MACH_PORT_MAKEB(index, bits);
-
- mach_port_names_helper(timestamp, entry, name,
+ mach_port_names_helper(timestamp, entry, entry->ie_name,
names, types, &actual);
}
}
-
- for (tentry = ipc_splay_traverse_start(&space->is_tree);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
- ipc_entry_t entry = &tentry->ite_entry;
- mach_port_t name = tentry->ite_name;
-
- assert(IE_BITS_TYPE(tentry->ite_bits) != MACH_PORT_TYPE_NONE);
-
- mach_port_names_helper(timestamp, entry, name,
- names, types, &actual);
- }
- ipc_splay_traverse_finish(&space->is_tree);
is_read_unlock(space);
if (actual == 0) {
@@ -434,10 +413,10 @@ mach_port_rename(
*/
kern_return_t
-mach_port_allocate_name(space, right, name)
- ipc_space_t space;
- mach_port_right_t right;
- mach_port_t name;
+mach_port_allocate_name(
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_t name)
{
kern_return_t kr;
@@ -497,10 +476,10 @@ mach_port_allocate_name(space, right, name)
*/
kern_return_t
-mach_port_allocate(space, right, namep)
- ipc_space_t space;
- mach_port_right_t right;
- mach_port_t *namep;
+mach_port_allocate(
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_t *namep)
{
kern_return_t kr;
@@ -555,7 +534,7 @@ mach_port_allocate(space, right, namep)
* KERN_INVALID_NAME The name doesn't denote a right.
*/
-static volatile int mach_port_deallocate_debug = 0;
+static volatile boolean_t mach_port_deallocate_debug = FALSE;
kern_return_t
mach_port_destroy(
@@ -570,8 +549,8 @@ mach_port_destroy(
kr = ipc_right_lookup_write(space, name, &entry);
if (kr != KERN_SUCCESS) {
- if (name != MACH_PORT_NULL && name != MACH_PORT_DEAD && space == current_space()) {
- printf("task %p destroying an invalid port %lu, most probably a bug.\n", current_task(), name);
+ if (MACH_PORT_VALID (name) && space == current_space()) {
+ printf("task %.*s destroying a bogus port %lu, most probably a bug.\n", sizeof current_task()->name, current_task()->name, name);
if (mach_port_deallocate_debug)
SoftDebugger("mach_port_deallocate");
}
@@ -614,8 +593,8 @@ mach_port_deallocate(
kr = ipc_right_lookup_write(space, name, &entry);
if (kr != KERN_SUCCESS) {
- if (name != MACH_PORT_NULL && name != MACH_PORT_DEAD && space == current_space()) {
- printf("task %p deallocating an invalid port %lu, most probably a bug.\n", current_task(), name);
+ if (MACH_PORT_VALID (name) && space == current_space()) {
+ printf("task %.*s deallocating a bogus port %lu, most probably a bug.\n", sizeof current_task()->name, current_task()->name, name);
if (mach_port_deallocate_debug)
SoftDebugger("mach_port_deallocate");
}
@@ -735,8 +714,19 @@ mach_port_mod_refs(
return KERN_INVALID_VALUE;
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ if (MACH_PORT_VALID (name) && space == current_space()) {
+ printf("task %.*s %screasing a bogus port "
+ "%lu by %d, most probably a bug.\n",
+ sizeof current_task()->name,
+ current_task()->name,
+ delta < 0 ? "de" : "in", name,
+ delta < 0 ? -delta : delta);
+ if (mach_port_deallocate_debug)
+ SoftDebugger("mach_port_mod_refs");
+ }
return kr;
+ }
/* space is write-locked and active */
kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */
@@ -744,48 +734,6 @@ mach_port_mod_refs(
}
/*
- * Routine: old_mach_port_get_receive_status [kernel call]
- * Purpose:
- * Compatibility for code written before sequence numbers.
- * Retrieves mucho info about a receive right.
- * Conditions:
- * Nothing locked.
- * Returns:
- * KERN_SUCCESS Retrieved status.
- * KERN_INVALID_TASK The space is null.
- * KERN_INVALID_TASK The space is dead.
- * KERN_INVALID_NAME The name doesn't denote a right.
- * KERN_INVALID_RIGHT Name doesn't denote receive rights.
- */
-
-kern_return_t
-mach_port_get_receive_status(ipc_space_t, mach_port_t, mach_port_status_t *);
-kern_return_t
-old_mach_port_get_receive_status(space, name, statusp)
- ipc_space_t space;
- mach_port_t name;
- old_mach_port_status_t *statusp;
-{
- mach_port_status_t status;
- kern_return_t kr;
-
- kr = mach_port_get_receive_status(space, name, &status);
- if (kr != KERN_SUCCESS)
- return kr;
-
- statusp->mps_pset = status.mps_pset;
- statusp->mps_mscount = status.mps_mscount;
- statusp->mps_qlimit = status.mps_qlimit;
- statusp->mps_msgcount = status.mps_msgcount;
- statusp->mps_sorights = status.mps_sorights;
- statusp->mps_srights = status.mps_srights;
- statusp->mps_pdrequest = status.mps_pdrequest;
- statusp->mps_nsrequest = status.mps_nsrequest;
-
- return KERN_SUCCESS;
-}
-
-/*
* Routine: mach_port_set_qlimit [kernel call]
* Purpose:
* Changes a receive right's queue limit.
@@ -803,10 +751,10 @@ old_mach_port_get_receive_status(space, name, statusp)
*/
kern_return_t
-mach_port_set_qlimit(space, name, qlimit)
- ipc_space_t space;
- mach_port_t name;
- mach_port_msgcount_t qlimit;
+mach_port_set_qlimit(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_msgcount_t qlimit)
{
ipc_port_t port;
kern_return_t kr;
@@ -977,10 +925,7 @@ mach_port_get_set_status(
size = PAGE_SIZE; /* initial guess */
for (;;) {
- ipc_tree_entry_t tentry;
- ipc_entry_t entry, table;
- ipc_entry_num_t tsize;
- mach_port_index_t index;
+ ipc_entry_t entry;
mach_port_t *names;
ipc_pset_t pset;
@@ -1017,11 +962,9 @@ mach_port_get_set_status(
maxnames = size / sizeof(mach_port_t);
actual = 0;
- table = space->is_table;
- tsize = space->is_table_size;
-
- for (index = 0; index < tsize; index++) {
- ipc_entry_t ientry = &table[index];
+ ipc_entry_t ientry;
+ struct rdxtree_iter iter;
+ rdxtree_for_each(&space->is_map, &iter, ientry) {
ipc_entry_bits_t bits = ientry->ie_bits;
if (bits & MACH_PORT_TYPE_RECEIVE) {
@@ -1033,22 +976,6 @@ mach_port_get_set_status(
}
}
- for (tentry = ipc_splay_traverse_start(&space->is_tree);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree,FALSE)) {
- ipc_entry_bits_t bits = tentry->ite_bits;
-
- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
-
- if (bits & MACH_PORT_TYPE_RECEIVE) {
- ipc_port_t port =
- (ipc_port_t) tentry->ite_object;
-
- mach_port_gst_helper(pset, port, maxnames,
- names, &actual);
- }
- }
- ipc_splay_traverse_finish(&space->is_tree);
is_read_unlock(space);
if (actual <= maxnames)
@@ -1367,10 +1294,10 @@ mach_port_extract_right(
*/
kern_return_t
-mach_port_get_receive_status(space, name, statusp)
- ipc_space_t space;
- mach_port_t name;
- mach_port_status_t *statusp;
+mach_port_get_receive_status(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_status_t *statusp)
{
ipc_port_t port;
kern_return_t kr;
@@ -1421,11 +1348,11 @@ mach_port_get_receive_status(space, name, statusp)
#ifdef MIGRATING_THREADS
kern_return_t
-mach_port_set_rpcinfo(space, name, rpc_info, rpc_info_count)
- ipc_space_t space;
- mach_port_t name;
- void *rpc_info;
- unsigned int rpc_info_count;
+mach_port_set_rpcinfo(
+ ipc_space_t space,
+ mach_port_t name,
+ void *rpc_info,
+ unsigned int rpc_info_count)
{
ipc_target_t target;
ipc_object_t object;
@@ -1459,19 +1386,19 @@ mach_port_set_rpcinfo(space, name, rpc_info, rpc_info_count)
int sacts, maxsacts;
#endif
-sact_count()
+void sact_count(void)
{
printf("%d server activations in use, %d max\n", sacts, maxsacts);
}
kern_return_t
-mach_port_create_act(task, name, user_stack, user_rbuf, user_rbuf_size, out_act)
- task_t task;
- mach_port_t name;
- vm_offset_t user_stack;
- vm_offset_t user_rbuf;
- vm_size_t user_rbuf_size;
- Act **out_act;
+mach_port_create_act(
+ task_t task,
+ mach_port_t name,
+ vm_offset_t user_stack,
+ vm_offset_t user_rbuf,
+ vm_size_t user_rbuf_size,
+ Act **out_act)
{
ipc_target_t target;
ipc_space_t space;
@@ -1538,9 +1465,9 @@ mach_port_create_act(task, name, user_stack, user_rbuf, user_rbuf_size, out_act)
#ifdef RPCKERNELSIG
kern_return_t
-mach_port_set_syscall_right(task, name)
- task_t task;
- mach_port_t name;
+mach_port_set_syscall_right(
+ task_t task,
+ mach_port_t name)
{
ipc_entry_t entry;
kern_return_t kr;
@@ -1566,3 +1493,76 @@ mach_port_set_syscall_right(task, name)
}
#endif
#endif /* MIGRATING_THREADS */
+
+/*
+ * Routine: mach_port_set_protected_payload [kernel call]
+ * Purpose:
+ * Changes a receive right's protected payload.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Set protected payload.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_set_protected_payload(
+ ipc_space_t space,
+ mach_port_t name,
+ unsigned long payload)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_set_protected_payload(port, payload);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
+
+/*
+ * Routine: mach_port_clear_protected_payload [kernel call]
+ * Purpose:
+ * Clears a receive right's protected payload.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Clear protected payload.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT Name doesn't denote receive rights.
+ */
+
+kern_return_t
+mach_port_clear_protected_payload(
+ ipc_space_t space,
+ mach_port_t name)
+{
+ ipc_port_t port;
+ kern_return_t kr;
+
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS)
+ return kr;
+ /* port is locked and active */
+
+ ipc_port_clear_protected_payload(port);
+
+ ip_unlock(port);
+ return KERN_SUCCESS;
+}
diff --git a/ipc/mach_port.h b/ipc/mach_port.h
index a82228fe..c4d9a1c3 100644
--- a/ipc/mach_port.h
+++ b/ipc/mach_port.h
@@ -43,6 +43,11 @@ mach_port_allocate (
mach_port_t *namep);
extern kern_return_t
+mach_port_destroy(
+ ipc_space_t space,
+ mach_port_t name);
+
+extern kern_return_t
mach_port_deallocate(
ipc_space_t space,
mach_port_t name);
@@ -54,4 +59,10 @@ mach_port_insert_right(
ipc_port_t poly,
mach_msg_type_name_t polyPoly);
+kern_return_t
+mach_port_get_receive_status(
+ ipc_space_t space,
+ mach_port_t name,
+ mach_port_status_t *statusp);
+
#endif /* _IPC_MACH_PORT_H_ */
diff --git a/ipc/mach_rpc.c b/ipc/mach_rpc.c
index 7f5b2eb2..6ca46cc9 100644
--- a/ipc/mach_rpc.c
+++ b/ipc/mach_rpc.c
@@ -58,9 +58,10 @@
* info to the other side.
*/
kern_return_t
-mach_port_rpc_copy(portp, sact, dact)
- struct rpc_port_desc *portp;
- struct Act *sact, *dact;
+mach_port_rpc_copy(
+ struct rpc_port_desc *portp,
+ struct Act *sact,
+ struct Act *dact)
{
ipc_space_t sspace, dspace;
mach_msg_type_name_t tname;
@@ -141,7 +142,7 @@ mach_port_rpc_copy(portp, sact, dact)
}
kern_return_t
-mach_port_rpc_sig(space, name, buffer, buflen)
+mach_port_rpc_sig(const ipc_space_t space, const char *name, const char *buffer, unsigned int buflen)
{
return KERN_FAILURE;
}
diff --git a/ipc/notify.defs b/ipc/notify.defs
new file mode 100644
index 00000000..db059b8d
--- /dev/null
+++ b/ipc/notify.defs
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2015 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* We use custom functions to send notifications. These functions can
+ be found in `ipc_notify.c'. We use this file merely to produce the
+ list of message ids. */
+
+#include <mach/notify.defs>
diff --git a/ipc/port.h b/ipc/port.h
index 6e9f77b4..49af6e2c 100644
--- a/ipc/port.h
+++ b/ipc/port.h
@@ -29,7 +29,7 @@
/*
*/
/*
- * File: ipc/ipc_port.h
+ * File: ipc/port.h
* Author: Rich Draves
* Date: 1989
*
@@ -45,10 +45,7 @@
* mach_port_t must be an unsigned type. Port values
* have two parts, a generation number and an index.
* These macros encapsulate all knowledge of how
- * a mach_port_t is layed out. However, ipc/ipc_entry.c
- * implicitly assumes when it uses the splay tree functions
- * that the generation number is in the low bits, so that
- * names are ordered first by index and then by generation.
+ * a mach_port_t is laid out.
*
* If the size of generation numbers changes,
* be sure to update IE_BITS_GEN_MASK and friends
diff --git a/kern/act.c b/kern/act.c
index 4c3839c6..3819ef32 100644
--- a/kern/act.c
+++ b/kern/act.c
@@ -64,11 +64,11 @@ static Act free_acts[ACT_STATIC_KLUDGE];
Act null_act;
void
-global_act_init()
+global_act_init(void)
{
#ifndef ACT_STATIC_KLUDGE
kmem_cache_init(&act_cache, "Act", sizeof(struct Act), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
#else
int i;
@@ -257,7 +257,7 @@ void act_detach(Act *cur_act)
so RPC entry paths need not check it.
Locking: Act */
-void act_execute_returnhandlers()
+void act_execute_returnhandlers(void)
{
Act *act = current_act();
@@ -1013,11 +1013,11 @@ act_set_special_port(Act *act, int which, ipc_port_t port)
* Return thread's machine-dependent state.
*/
kern_return_t
-act_get_state_immediate(act, flavor, old_state, old_state_count)
- register Act *act;
- int flavor;
- void *old_state; /* pointer to OUT array */
- unsigned int *old_state_count; /*IN/OUT*/
+act_get_state_immediate(
+ Act *act,
+ int flavor,
+ void *old_state, /* pointer to OUT array */
+ unsigned int *old_state_count) /*IN/OUT*/
{
kern_return_t ret;
@@ -1039,11 +1039,11 @@ act_get_state_immediate(act, flavor, old_state, old_state_count)
* Change thread's machine-dependent state.
*/
kern_return_t
-act_set_state_immediate(act, flavor, new_state, new_state_count)
- register Act *act;
- int flavor;
- void *new_state;
- unsigned int new_state_count;
+act_set_state_immediate(
+ Act *act,
+ int flavor,
+ void *new_state,
+ unsigned int new_state_count)
{
kern_return_t ret;
@@ -1061,7 +1061,7 @@ act_set_state_immediate(act, flavor, new_state, new_state_count)
return act_set_state(act, flavor, new_state, new_state_count);
}
-void act_count()
+void act_count(void)
{
int i;
Act *act;
@@ -1076,7 +1076,7 @@ void act_count()
ACT_STATIC_KLUDGE-i, ACT_STATIC_KLUDGE, ACT_STATIC_KLUDGE-amin);
}
-dump_act(act)
+void dump_act(act)
Act *act;
{
act_count();
@@ -1097,8 +1097,7 @@ dump_act(act)
#ifdef ACTWATCH
Act *
-get_next_act(sp)
- int sp;
+get_next_act(int sp)
{
static int i;
Act *act;
@@ -1114,6 +1113,6 @@ get_next_act(sp)
return act;
}
}
-#endif
+#endif /* ACTWATCH */
#endif /* MIGRATING_THREADS */
diff --git a/kern/act.h b/kern/act.h
index e0647244..f46f53a3 100644
--- a/kern/act.h
+++ b/kern/act.h
@@ -40,8 +40,6 @@
#include <kern/refcount.h>
#include <kern/queue.h>
-#include "act.h"/*XXX*/
-
struct task;
struct thread;
struct Act;
@@ -176,7 +174,6 @@ kern_return_t act_terminate_task_locked(struct Act *act);
/* Exported to thread.c */
extern Act null_act;
-kern_return_t act_create_kernel(Act **out_act);
/* Exported to machine-dependent activation code */
void act_execute_returnhandlers(void);
@@ -192,4 +189,4 @@ kern_return_t act_machine_get_state(Act *inc, int flavor, int *tstate, unsigned
#endif /* MIGRATING_THREADS */
-#endif _KERN_ACT_H_
+#endif /* _KERN_ACT_H_ */
diff --git a/kern/assert.h b/kern/assert.h
index 2829728b..7b66d1b1 100644
--- a/kern/assert.h
+++ b/kern/assert.h
@@ -29,26 +29,21 @@
/* assert.h 4.2 85/01/21 */
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#ifndef NDEBUG
#define MACH_ASSERT 1
#endif
#if MACH_ASSERT
-extern void Assert(char *exp, char *filename, int line) __attribute__ ((noreturn));
+extern void Assert(const char *exp, const char *filename, int line) __attribute__ ((noreturn));
#define assert(ex) \
-MACRO_BEGIN \
- if (!(ex)) \
- Assert(#ex, __FILE__, __LINE__); \
-MACRO_END
-
-#ifdef lint
-#define assert_static(x)
-#else /* lint */
+ ((ex) \
+ ? (void) (0) \
+ : Assert (#ex, __FILE__, __LINE__))
+
#define assert_static(x) assert(x)
-#endif /* lint */
#else /* MACH_ASSERT */
#define assert(ex)
diff --git a/kern/ast.c b/kern/ast.c
index 97da3abc..2772ed3e 100644
--- a/kern/ast.c
+++ b/kern/ast.c
@@ -56,10 +56,10 @@
volatile ast_t need_ast[NCPUS];
void
-ast_init()
+ast_init(void)
{
#ifndef MACHINE_AST
- register int i;
+ int i;
for (i=0; i<NCPUS; i++)
need_ast[i] = 0;
@@ -69,8 +69,8 @@ ast_init()
void
ast_taken(void)
{
- register thread_t self = current_thread();
- register ast_t reasons;
+ thread_t self = current_thread();
+ ast_t reasons;
/*
* Interrupts are still disabled.
@@ -96,7 +96,7 @@ ast_taken(void)
if (self != current_processor()->idle_thread) {
#ifndef MIGRATING_THREADS
while (thread_should_halt(self))
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
#endif
/*
@@ -114,12 +114,12 @@ ast_taken(void)
}
void
-ast_check()
+ast_check(void)
{
- register int mycpu = cpu_number();
- register processor_t myprocessor;
- register thread_t thread = current_thread();
- register run_queue_t rq;
+ int mycpu = cpu_number();
+ processor_t myprocessor;
+ thread_t thread = current_thread();
+ run_queue_t rq;
spl_t s = splsched();
/*
@@ -190,7 +190,7 @@ ast_check()
#endif /* MACH_FIXPRI */
rq = &(myprocessor->processor_set->runq);
if (!(myprocessor->first_quantum) && (rq->count > 0)) {
- register queue_t q;
+ queue_t q;
/*
* This is not the first quantum, and there may
* be something in the processor_set runq.
@@ -198,7 +198,7 @@ ast_check()
*/
q = rq->runq + *(volatile int *)&rq->low;
if (queue_empty(q)) {
- register int i;
+ int i;
/*
* Need to recheck and possibly update hint.
diff --git a/kern/ast.h b/kern/ast.h
index 4c28b1e6..7d472be9 100644
--- a/kern/ast.h
+++ b/kern/ast.h
@@ -41,7 +41,7 @@
*/
#include "cpu_number.h"
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <machine/ast.h>
/*
diff --git a/kern/boot_script.c b/kern/boot_script.c
index b2e9393b..b245d1d8 100644
--- a/kern/boot_script.c
+++ b/kern/boot_script.c
@@ -76,14 +76,14 @@ create_task (struct cmd *cmd, long *val)
/* Resume a task. */
static int
-resume_task (struct cmd *cmd, long *val)
+resume_task (struct cmd *cmd, const long *val)
{
return boot_script_task_resume (cmd);
}
/* Resume a task when the user hits return. */
static int
-prompt_resume_task (struct cmd *cmd, long *val)
+prompt_resume_task (struct cmd *cmd, const long *val)
{
return boot_script_prompt_task_resume (cmd);
}
@@ -485,7 +485,7 @@ boot_script_parse_line (void *hook, char *cmdline)
/* Execute commands previously parsed. */
int
-boot_script_exec ()
+boot_script_exec (void)
{
int cmd_index;
diff --git a/kern/boot_script.h b/kern/boot_script.h
index c5ad6732..c007d777 100644
--- a/kern/boot_script.h
+++ b/kern/boot_script.h
@@ -69,10 +69,6 @@ int boot_script_exec_cmd (void *hook,
task_t task, char *path, int argc,
char **argv, char *strings, int stringlen);
-/* The user must define this function. Load the contents of FILE
- into a fresh anonymous memory object and return the memory object port. */
-mach_port_t boot_script_read_file (const char *file);
-
/* The user must define this functions to perform the corresponding
Mach task manipulations. */
int boot_script_task_create (struct cmd *); /* task_create + task_suspend */
diff --git a/kern/bootstrap.c b/kern/bootstrap.c
index c98b0a2c..08362767 100644
--- a/kern/bootstrap.c
+++ b/kern/bootstrap.c
@@ -37,6 +37,7 @@
#include <mach/message.h>
#include <machine/locore.h>
#include <machine/vm_param.h>
+#include <machine/pcb.h>
#include <ipc/ipc_port.h>
#include <ipc/mach_port.h>
#include <kern/debug.h>
@@ -81,8 +82,8 @@ static mach_port_t boot_host_port; /* local name */
extern char *kernel_cmdline;
-static void user_bootstrap(); /* forward */
-static void user_bootstrap_compat(); /* forward */
+static void user_bootstrap(void); /* forward */
+static void user_bootstrap_compat(void); /* forward */
static void bootstrap_exec_compat(void *exec_data); /* forward */
static void get_compat_strings(char *flags_str, char *root_str); /* forward */
@@ -106,7 +107,21 @@ task_insert_send_right(
return name;
}
-void bootstrap_create()
+static void
+free_bootstrap_pages(phys_addr_t start, phys_addr_t end)
+{
+ struct vm_page *page;
+
+ while (start < end)
+ {
+ page = vm_page_lookup_pa(start);
+ assert(page != NULL);
+ vm_page_manage(page);
+ start += PAGE_SIZE;
+ }
+}
+
+void bootstrap_create(void)
{
int compat;
int n = 0;
@@ -149,18 +164,18 @@ void bootstrap_create()
}
else
{
- int i, losers, maxlen;
+ int i, losers;
/* Initialize boot script variables. We leak these send rights. */
losers = boot_script_set_variable
("host-port", VAL_PORT,
- (long)ipc_port_make_send(realhost.host_priv_self));
+ (long) realhost.host_priv_self);
if (losers)
panic ("cannot set boot-script variable host-port: %s",
boot_script_error_string (losers));
losers = boot_script_set_variable
("device-port", VAL_PORT,
- (long) ipc_port_make_send(master_device_port));
+ (long) master_device_port);
if (losers)
panic ("cannot set boot-script variable device-port: %s",
boot_script_error_string (losers));
@@ -240,15 +255,11 @@ void bootstrap_create()
}
#endif
- maxlen = 0;
for (i = 0; i < boot_info.mods_count; ++i)
{
int err;
char *line = (char*)phystokv(bmods[i].string);
- int len = strlen (line) + 1;
- if (len > maxlen)
- maxlen = len;
- printf ("\rmodule %d: %*s", i, -maxlen, line);
+ printf ("module %d: %s\n", i, line);
err = boot_script_parse_line (&bmods[i], line);
if (err)
{
@@ -256,7 +267,7 @@ void bootstrap_create()
++losers;
}
}
- printf ("\r%d multiboot modules %*s", i, -maxlen, "");
+ printf ("%d multiboot modules\n", i);
if (losers)
panic ("%d of %d boot script commands could not be parsed",
losers, boot_info.mods_count);
@@ -268,7 +279,7 @@ void bootstrap_create()
/* XXX we could free the memory used
by the boot loader's descriptors and such. */
for (n = 0; n < boot_info.mods_count; n++)
- vm_page_create(bmods[n].mod_start, bmods[n].mod_end);
+ free_bootstrap_pages(bmods[n].mod_start, bmods[n].mod_end);
}
static void
@@ -318,7 +329,7 @@ itoa(
vm_size_t num)
{
char buf[sizeof(vm_size_t)*2+3];
- register char *np;
+ char *np;
np = buf + sizeof(buf);
*--np = 0;
@@ -338,7 +349,7 @@ itoa(
*/
static void get_compat_strings(char *flags_str, char *root_str)
{
- register char *ip, *cp;
+ char *ip, *cp;
strcpy (root_str, "UNKNOWN");
@@ -519,16 +530,12 @@ static void copy_bootstrap(void *e, exec_info_t *boot_exec_info)
/*
* Allocate the stack, and build the argument list.
*/
-extern vm_offset_t user_stack_low();
-extern vm_offset_t set_user_regs();
-
static void
build_args_and_stack(struct exec_info *boot_exec_info,
char **argv, char **envp)
{
vm_offset_t stack_base;
vm_size_t stack_size;
- register
char * arg_ptr;
int arg_count, envc;
int arg_len;
@@ -590,7 +597,7 @@ build_args_and_stack(struct exec_info *boot_exec_info,
/*
* first the argument count
*/
- (void) copyout((char *)&arg_count,
+ (void) copyout(&arg_count,
arg_pos,
sizeof(integer_t));
arg_pos += sizeof(integer_t);
@@ -603,7 +610,7 @@ build_args_and_stack(struct exec_info *boot_exec_info,
arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */
/* set string pointer */
- (void) copyout((char *)&string_pos,
+ (void) copyout(&string_pos,
arg_pos,
sizeof (char *));
arg_pos += sizeof(char *);
@@ -616,7 +623,7 @@ build_args_and_stack(struct exec_info *boot_exec_info,
/*
* Null terminator for argv.
*/
- (void) copyout((char *)&zero, arg_pos, sizeof(char *));
+ (void) copyout(&zero, arg_pos, sizeof(char *));
arg_pos += sizeof(char *);
/*
@@ -627,7 +634,7 @@ build_args_and_stack(struct exec_info *boot_exec_info,
arg_item_len = strlen(arg_ptr) + 1; /* include trailing 0 */
/* set string pointer */
- (void) copyout((char *)&string_pos,
+ (void) copyout(&string_pos,
arg_pos,
sizeof (char *));
arg_pos += sizeof(char *);
@@ -640,12 +647,12 @@ build_args_and_stack(struct exec_info *boot_exec_info,
/*
* Null terminator for envp.
*/
- (void) copyout((char *)&zero, arg_pos, sizeof(char *));
+ (void) copyout(&zero, arg_pos, sizeof(char *));
}
static void
-user_bootstrap_compat()
+user_bootstrap_compat(void)
{
exec_info_t boot_exec_info;
@@ -726,13 +733,14 @@ boot_script_exec_cmd (void *hook, task_t task, char *path, int argc,
thread_t thread;
struct user_bootstrap_info info = { mod, argv, 0, };
simple_lock_init (&info.lock);
- simple_lock (&info.lock);
err = thread_create ((task_t)task, &thread);
assert(err == 0);
+ simple_lock (&info.lock);
thread->saved.other = &info;
thread_start (thread, user_bootstrap);
- thread_resume (thread);
+ err = thread_resume (thread);
+ assert(err == 0);
/* We need to synchronize with the new thread and block this
main thread until it has finished referring to our local state. */
@@ -741,13 +749,15 @@ boot_script_exec_cmd (void *hook, task_t task, char *path, int argc,
thread_sleep ((event_t) &info, simple_lock_addr(info.lock), FALSE);
simple_lock (&info.lock);
}
+ simple_unlock (&info.lock);
+ thread_deallocate (thread);
printf ("\n");
}
return 0;
}
-static void user_bootstrap()
+static void user_bootstrap(void)
{
struct user_bootstrap_info *info = current_thread()->saved.other;
exec_info_t boot_exec_info;
@@ -775,6 +785,7 @@ static void user_bootstrap()
simple_lock (&info->lock);
assert (!info->done);
info->done = 1;
+ simple_unlock (&info->lock);
thread_wakeup ((event_t) info);
/*
@@ -807,6 +818,7 @@ boot_script_task_create (struct cmd *cmd)
printf("boot_script_task_create failed with %x\n", rc);
return BOOT_SCRIPT_MACH_ERROR;
}
+ task_set_name(cmd->task, cmd->path);
return 0;
}
@@ -826,10 +838,18 @@ boot_script_task_resume (struct cmd *cmd)
int
boot_script_prompt_task_resume (struct cmd *cmd)
{
+#if ! MACH_KDB
char xx[5];
+#endif
- printf ("Hit return to resume %s...", cmd->path);
+ printf ("Pausing for %s...\n", cmd->path);
+
+#if ! MACH_KDB
+ printf ("Hit <return> to resume bootstrap.");
safe_gets (xx, sizeof xx);
+#else
+ SoftDebugger("Hit `c<return>' to resume bootstrap.");
+#endif
return boot_script_task_resume (cmd);
}
@@ -839,12 +859,14 @@ boot_script_free_task (task_t task, int aborting)
{
if (aborting)
task_terminate (task);
+ task_deallocate (task);
}
int
boot_script_insert_right (struct cmd *cmd, mach_port_t port, mach_port_t *name)
{
- *name = task_insert_send_right (cmd->task, (ipc_port_t)port);
+ *name = task_insert_send_right (cmd->task,
+ ipc_port_make_send((ipc_port_t) port));
return 0;
}
diff --git a/kern/bootstrap.h b/kern/bootstrap.h
new file mode 100644
index 00000000..b8ed8d9f
--- /dev/null
+++ b/kern/bootstrap.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_BOOTSTRAP_H_
+#define _KERN_BOOTSTRAP_H_
+
+extern void bootstrap_create(void);
+
+#endif /* _KERN_BOOTSTRAP_H_ */
diff --git a/kern/counters.c b/kern/counters.c
index a9d450e1..0a0665bf 100644
--- a/kern/counters.c
+++ b/kern/counters.c
@@ -32,13 +32,12 @@
* This makes them easier to examine with ddb.
*/
+#if MACH_COUNTERS
mach_counter_t c_thread_invoke_hits = 0;
mach_counter_t c_thread_invoke_misses = 0;
mach_counter_t c_thread_invoke_csw = 0;
mach_counter_t c_thread_handoff_hits = 0;
mach_counter_t c_thread_handoff_misses = 0;
-
-#if MACH_COUNTERS
mach_counter_t c_threads_current = 0;
mach_counter_t c_threads_max = 0;
mach_counter_t c_threads_min = 0;
@@ -47,6 +46,9 @@ mach_counter_t c_stacks_current = 0;
mach_counter_t c_stacks_max = 0;
mach_counter_t c_stacks_min = 0;
mach_counter_t c_stacks_total = 0;
+mach_counter_t c_stack_alloc_hits = 0;
+mach_counter_t c_stack_alloc_misses = 0;
+mach_counter_t c_stack_alloc_max = 0;
mach_counter_t c_clock_ticks = 0;
mach_counter_t c_ipc_mqueue_send_block = 0;
mach_counter_t c_ipc_mqueue_receive_block_user = 0;
diff --git a/kern/counters.h b/kern/counters.h
index 474c6a29..aa1e739b 100644
--- a/kern/counters.h
+++ b/kern/counters.h
@@ -55,13 +55,12 @@
typedef unsigned int mach_counter_t;
+#if MACH_COUNTERS
extern mach_counter_t c_thread_invoke_hits;
extern mach_counter_t c_thread_invoke_misses;
extern mach_counter_t c_thread_invoke_csw;
extern mach_counter_t c_thread_handoff_hits;
extern mach_counter_t c_thread_handoff_misses;
-
-#if MACH_COUNTERS
extern mach_counter_t c_threads_current;
extern mach_counter_t c_threads_max;
extern mach_counter_t c_threads_min;
@@ -70,6 +69,9 @@ extern mach_counter_t c_stacks_current;
extern mach_counter_t c_stacks_max;
extern mach_counter_t c_stacks_min;
extern mach_counter_t c_stacks_total;
+extern mach_counter_t c_stack_alloc_hits;
+extern mach_counter_t c_stack_alloc_misses;
+extern mach_counter_t c_stack_alloc_max;
extern mach_counter_t c_clock_ticks;
extern mach_counter_t c_ipc_mqueue_send_block;
extern mach_counter_t c_ipc_mqueue_receive_block_user;
diff --git a/kern/cpu_number.h b/kern/cpu_number.h
index 44bbd641..650f4042 100644
--- a/kern/cpu_number.h
+++ b/kern/cpu_number.h
@@ -37,5 +37,7 @@ int master_cpu; /* 'master' processor - keeps time */
/* cpu number is always 0 on a single processor system */
#define cpu_number() (0)
+#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
+
#endif /* NCPUS == 1 */
#endif /* _KERN_CPU_NUMBER_H_ */
diff --git a/kern/debug.c b/kern/debug.c
index 7f6e5557..fd392d21 100644
--- a/kern/debug.c
+++ b/kern/debug.c
@@ -38,11 +38,7 @@
#include <machine/loose_ends.h>
#include <machine/model_dep.h>
-extern void cnputc();
-
-#if MACH_KDB
-extern int db_breakpoints_inserted;
-#endif
+#include <device/cons.h>
#if NCPUS>1
simple_lock_data_t Assert_print_lock; /* uninited, we take our chances */
@@ -55,7 +51,7 @@ do_cnputc(char c, vm_offset_t offset)
}
void
-Assert(char *exp, char *file, int line)
+Assert(const char *exp, const char *file, int line)
{
#if NCPUS > 1
simple_lock(&Assert_print_lock);
@@ -67,14 +63,11 @@ Assert(char *exp, char *file, int line)
exp, file, line);
#endif
-#if MACH_KDB
- if (db_breakpoints_inserted)
-#endif
Debugger("assertion failure");
}
void SoftDebugger(message)
- char * message;
+ const char *message;
{
printf("Debugger invoked: %s\n", message);
@@ -106,7 +99,7 @@ void SoftDebugger(message)
}
void Debugger(message)
- char * message;
+ const char *message;
{
#if !MACH_KDB
panic("Debugger invoked, but there isn't one!");
@@ -199,14 +192,12 @@ log(int level, const char *fmt, ...)
{
va_list listp;
-#ifdef lint
- level++;
-#endif
va_start(listp, fmt);
_doprnt(fmt, listp, do_cnputc, 0, 0);
va_end(listp);
}
+/* GCC references this for stack protection. */
unsigned char __stack_chk_guard [ sizeof (vm_offset_t) ] =
{
[ sizeof (vm_offset_t) - 3 ] = '\r',
diff --git a/kern/debug.h b/kern/debug.h
index e429bdd1..6c8977b8 100644
--- a/kern/debug.h
+++ b/kern/debug.h
@@ -62,7 +62,7 @@ extern void log (int level, const char *fmt, ...);
extern void panic_init(void);
extern void panic (const char *s, ...) __attribute__ ((noreturn));
-extern void SoftDebugger (char *message);
-extern void Debugger (char *message) __attribute__ ((noreturn));
+extern void SoftDebugger (const char *message);
+extern void Debugger (const char *message) __attribute__ ((noreturn));
#endif /* _mach_debug__debug_ */
diff --git a/kern/elf-load.c b/kern/elf-load.c
index 1d103d3c..441276ef 100644
--- a/kern/elf-load.c
+++ b/kern/elf-load.c
@@ -80,6 +80,8 @@ int exec_load(exec_read_func_t *read, exec_read_exec_func_t *read_exec,
result = (*read_exec)(handle,
ph->p_offset, ph->p_filesz,
ph->p_vaddr, ph->p_memsz, type);
+ if (result)
+ return result;
}
}
diff --git a/kern/eventcount.c b/kern/eventcount.c
index 6fcebff5..a9d7bd41 100644
--- a/kern/eventcount.c
+++ b/kern/eventcount.c
@@ -53,13 +53,6 @@
#include <kern/eventcount.h>
-
-#if NCPUS <= 1
-void simpler_thread_setrun(
- thread_t th,
- boolean_t may_preempt); /* forward */
-#endif
-
#define MAX_EVCS 10 /* xxx for now */
evc_t all_eventcounters[MAX_EVCS];
@@ -105,7 +98,7 @@ evc_destroy(evc_t ev)
* Thread termination.
* HORRIBLE. This stuff needs to be fixed.
*/
-void evc_notify_abort(thread_t thread)
+void evc_notify_abort(const thread_t thread)
{
int i;
evc_t ev;
@@ -130,7 +123,7 @@ void evc_notify_abort(thread_t thread)
* Just so that we return success, and give
* up the stack while blocked
*/
-static void
+static void __attribute__((noreturn))
evc_continue(void)
{
thread_syscall_return(KERN_SUCCESS);
@@ -235,8 +228,8 @@ kern_return_t evc_wait_clear(natural_t ev_id)
void
evc_signal(evc_t ev)
{
- register volatile thread_t thread;
- register int state;
+ volatile thread_t thread;
+ int state;
spl_t s;
if (ev->sanity != ev)
return;
@@ -325,8 +318,8 @@ simpler_thread_setrun(
thread_t th,
boolean_t may_preempt)
{
- register struct run_queue *rq;
- register int whichq;
+ struct run_queue *rq;
+ int whichq;
/*
* XXX should replace queue with a boolean in this case.
@@ -347,7 +340,7 @@ simpler_thread_setrun(
whichq = (th)->sched_pri;
simple_lock(&(rq)->lock); /* lock the run queue */
- enqueue_head(&(rq)->runq[whichq], (queue_entry_t) (th));
+ enqueue_head(&(rq)->runq[whichq], &((th)->links));
if (whichq < (rq)->low || (rq)->count == 0)
(rq)->low = whichq; /* minimize */
diff --git a/kern/eventcount.h b/kern/eventcount.h
index 6872a347..7cc82207 100644
--- a/kern/eventcount.h
+++ b/kern/eventcount.h
@@ -53,7 +53,12 @@ extern void evc_init(evc_t ev),
/* kernel and user visible */
extern kern_return_t evc_wait(natural_t ev_id);
+extern kern_return_t evc_wait_clear(natural_t ev_id);
-extern void evc_notify_abort (thread_t thread);
+#if NCPUS <= 1
+void simpler_thread_setrun(
+ thread_t th,
+ boolean_t may_preempt);
+#endif
#endif /* _KERN_EVENTCOUNT_H_ */
diff --git a/kern/exc.defs b/kern/exc.defs
new file mode 100644
index 00000000..e614fff8
--- /dev/null
+++ b/kern/exc.defs
@@ -0,0 +1,22 @@
+/*
+ * Copyright (c) 2016 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+/* We use custom functions to send exceptions. These functions can
+ be found in `exception.c'. We use this file merely to produce the
+ list of message ids. */
+
+#include <mach/exc.defs>
diff --git a/kern/exception.c b/kern/exception.c
index 453a0758..63a63d66 100644
--- a/kern/exception.c
+++ b/kern/exception.c
@@ -47,23 +47,13 @@
#include <kern/processor.h>
#include <kern/sched.h>
#include <kern/sched_prim.h>
+#include <kern/exception.h>
+#include <kern/macros.h>
#include <mach/machine/vm_types.h>
-
-
-extern void exception() __attribute__ ((noreturn));
-extern void exception_try_task() __attribute__ ((noreturn));
-extern void exception_no_server() __attribute__ ((noreturn));
-
-extern void exception_raise() __attribute__ ((noreturn));
-extern kern_return_t exception_parse_reply();
-extern void exception_raise_continue() __attribute__ ((noreturn));
-extern void exception_raise_continue_slow() __attribute__ ((noreturn));
-extern void exception_raise_continue_fast() __attribute__ ((noreturn));
-
#if MACH_KDB
-extern void thread_kdb_return();
-extern void db_printf();
+#include <machine/trap.h>
+#include <ddb/db_output.h>
boolean_t debug_user_with_kdb = FALSE;
#endif /* MACH_KDB */
@@ -93,11 +83,13 @@ boolean_t debug_user_with_kdb = FALSE;
*/
void
-exception(_exception, code, subcode)
- integer_t _exception, code, subcode;
+exception(
+ integer_t _exception,
+ integer_t code,
+ integer_t subcode)
{
- register ipc_thread_t self = current_thread();
- register ipc_port_t exc_port;
+ ipc_thread_t self = current_thread();
+ ipc_port_t exc_port;
if (_exception == KERN_SUCCESS)
panic("exception");
@@ -163,12 +155,14 @@ exception(_exception, code, subcode)
*/
void
-exception_try_task(_exception, code, subcode)
- integer_t _exception, code, subcode;
+exception_try_task(
+ integer_t _exception,
+ integer_t code,
+ integer_t subcode)
{
ipc_thread_t self = current_thread();
- register task_t task = self->task;
- register ipc_port_t exc_port;
+ task_t task = self->task;
+ ipc_port_t exc_port;
/*
* Optimized version of retrieve_task_exception.
@@ -228,16 +222,16 @@ exception_try_task(_exception, code, subcode)
*/
void
-exception_no_server()
+exception_no_server(void)
{
- register ipc_thread_t self = current_thread();
+ ipc_thread_t self = current_thread();
/*
* If this thread is being terminated, cooperate.
*/
while (thread_should_halt(self))
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
#if 0
@@ -263,7 +257,7 @@ exception_no_server()
*/
(void) task_terminate(self->task);
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
panic("terminating the task didn't kill us");
/*NOTREACHED*/
}
@@ -330,12 +324,13 @@ mach_msg_type_t exc_code_proto = {
int exception_raise_misses = 0;
void
-exception_raise(dest_port, thread_port, task_port,
- _exception, code, subcode)
- ipc_port_t dest_port;
- ipc_port_t thread_port;
- ipc_port_t task_port;
- integer_t _exception, code, subcode;
+exception_raise(
+ ipc_port_t dest_port,
+ ipc_port_t thread_port,
+ ipc_port_t task_port,
+ integer_t _exception,
+ integer_t code,
+ integer_t subcode)
{
ipc_thread_t self = current_thread();
ipc_thread_t receiver;
@@ -428,7 +423,7 @@ exception_raise(dest_port, thread_port, task_port,
*/
{
- register ipc_pset_t dest_pset;
+ ipc_pset_t dest_pset;
dest_pset = dest_port->ip_pset;
if (dest_pset == IPS_NULL)
@@ -490,7 +485,7 @@ exception_raise(dest_port, thread_port, task_port,
* Release the receiver's reference for his object.
*/
{
- register ipc_object_t object = receiver->ith_object;
+ ipc_object_t object = receiver->ith_object;
io_lock(object);
io_release(object);
@@ -498,7 +493,7 @@ exception_raise(dest_port, thread_port, task_port,
}
{
- register struct mach_exception *exc =
+ struct mach_exception *exc =
(struct mach_exception *) &kmsg->ikm_header;
ipc_space_t space = receiver->task->itk_space;
@@ -609,30 +604,18 @@ exception_raise(dest_port, thread_port, task_port,
ip_unlock(reply_port);
{
- register ipc_entry_t table;
- register ipc_entry_t entry;
- register mach_port_index_t index;
-
- /* optimized ipc_entry_get */
-
- table = space->is_table;
- index = table->ie_next;
+ kern_return_t kr;
+ ipc_entry_t entry;
- if (index == 0)
+ kr = ipc_entry_get (space, &exc->Head.msgh_remote_port, &entry);
+ if (kr)
goto abort_copyout;
-
- entry = &table[index];
- table->ie_next = entry->ie_next;
- entry->ie_request = 0;
-
{
- register mach_port_gen_t gen;
+ mach_port_gen_t gen;
assert((entry->ie_bits &~ IE_BITS_GEN_MASK) == 0);
gen = entry->ie_bits + IE_BITS_GEN_ONE;
- exc->Head.msgh_remote_port = MACH_PORT_MAKE(index, gen);
-
/* optimized ipc_right_copyout */
entry->ie_bits = gen | (MACH_PORT_TYPE_SEND_ONCE | 1);
@@ -710,7 +693,7 @@ exception_raise(dest_port, thread_port, task_port,
#endif
slow_exception_raise: {
- register struct mach_exception *exc =
+ struct mach_exception *exc =
(struct mach_exception *) &kmsg->ikm_header;
ipc_kmsg_t reply_kmsg;
mach_port_seqno_t reply_seqno;
@@ -772,6 +755,12 @@ exception_raise(dest_port, thread_port, task_port,
}
}
+/* Macro used by MIG to cleanly check the type. */
+#define BAD_TYPECHECK(type, check) unlikely (({\
+ union { mach_msg_type_t t; unsigned32_t w; } _t, _c;\
+ _t.t = *(type); _c.t = *(check);_t.w != _c.w; }))
+
+/* Type descriptor for the return code. */
mach_msg_type_t exc_RetCode_proto = {
/* msgt_name = */ MACH_MSG_TYPE_INTEGER_32,
/* msgt_size = */ 32,
@@ -794,10 +783,9 @@ mach_msg_type_t exc_RetCode_proto = {
*/
kern_return_t
-exception_parse_reply(kmsg)
- ipc_kmsg_t kmsg;
+exception_parse_reply(ipc_kmsg_t kmsg)
{
- register mig_reply_header_t *msg =
+ mig_reply_header_t *msg =
(mig_reply_header_t *) &kmsg->ikm_header;
kern_return_t kr;
@@ -805,7 +793,7 @@ exception_parse_reply(kmsg)
MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND_ONCE, 0)) ||
(msg->Head.msgh_size != sizeof *msg) ||
(msg->Head.msgh_id != MACH_EXCEPTION_REPLY_ID) ||
- (* (int *) &msg->RetCodeType != * (int *) &exc_RetCode_proto)) {
+ (BAD_TYPECHECK(&msg->RetCodeType, &exc_RetCode_proto))) {
/*
* Bozo user sent us a misformatted reply.
*/
@@ -839,7 +827,7 @@ exception_parse_reply(kmsg)
*/
void
-exception_raise_continue()
+exception_raise_continue(void)
{
ipc_thread_t self = current_thread();
ipc_port_t reply_port = self->ith_port;
@@ -860,6 +848,26 @@ exception_raise_continue()
}
/*
+ * Routine: thread_release_and_exception_return
+ * Purpose:
+ * Continue after thread was halted.
+ * Conditions:
+ * Nothing locked. We are running on a new kernel stack and
+ * control goes back to thread_exception_return.
+ * Returns:
+ * Doesn't return.
+ */
+static void
+thread_release_and_exception_return(void)
+{
+ ipc_thread_t self = current_thread();
+ /* reply port must be released */
+ ipc_port_release(self->ith_port);
+ thread_exception_return();
+ /*NOTREACHED*/
+}
+
+/*
* Routine: exception_raise_continue_slow
* Purpose:
* Continue after finishing an ipc_mqueue_receive
@@ -871,10 +879,10 @@ exception_raise_continue()
*/
void
-exception_raise_continue_slow(mr, kmsg, seqno)
- mach_msg_return_t mr;
- ipc_kmsg_t kmsg;
- mach_port_seqno_t seqno;
+exception_raise_continue_slow(
+ mach_msg_return_t mr,
+ ipc_kmsg_t kmsg,
+ mach_port_seqno_t seqno)
{
ipc_thread_t self = current_thread();
ipc_port_t reply_port = self->ith_port;
@@ -888,10 +896,14 @@ exception_raise_continue_slow(mr, kmsg, seqno)
*/
while (thread_should_halt(self)) {
- /* don't terminate while holding a reference */
+ /* if thread is about to terminate, release the port */
if (self->ast & AST_TERMINATE)
ipc_port_release(reply_port);
- thread_halt_self();
+ /*
+ * Use the continuation to release the port in
+ * case the thread is about to halt.
+ */
+ thread_halt_self(thread_release_and_exception_return);
}
ip_lock(reply_port);
@@ -954,9 +966,9 @@ exception_raise_continue_slow(mr, kmsg, seqno)
*/
void
-exception_raise_continue_fast(reply_port, kmsg)
- ipc_port_t reply_port;
- ipc_kmsg_t kmsg;
+exception_raise_continue_fast(
+ ipc_port_t reply_port,
+ ipc_kmsg_t kmsg)
{
ipc_thread_t self = current_thread();
kern_return_t kr;
diff --git a/kern/exception.h b/kern/exception.h
new file mode 100644
index 00000000..55902dd1
--- /dev/null
+++ b/kern/exception.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_EXCEPTION_H_
+#define _KERN_EXCEPTION_H_
+
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_kmsg.h>
+
+extern void
+exception(
+ integer_t _exception,
+ integer_t code,
+ integer_t subcode) __attribute__ ((noreturn));
+
+extern void
+exception_try_task(
+ integer_t _exception,
+ integer_t code,
+ integer_t subcode) __attribute__ ((noreturn));
+
+extern void
+exception_no_server(void) __attribute__ ((noreturn));
+
+extern void
+exception_raise(
+ ipc_port_t dest_port,
+ ipc_port_t thread_port,
+ ipc_port_t task_port,
+ integer_t _exception,
+ integer_t code,
+ integer_t subcode) __attribute__ ((noreturn));
+
+extern kern_return_t
+exception_parse_reply(ipc_kmsg_t kmsg);
+
+extern void
+exception_raise_continue(void) __attribute__ ((noreturn));
+
+extern void
+exception_raise_continue_slow(
+ mach_msg_return_t mr,
+ ipc_kmsg_t kmsg,
+ mach_port_seqno_t seqno) __attribute__ ((noreturn));
+
+extern void
+exception_raise_continue_fast(
+ ipc_port_t reply_port,
+ ipc_kmsg_t kmsg) __attribute__ ((noreturn));
+
+#endif /* _KERN_EXCEPTION_H_ */
diff --git a/kern/host.c b/kern/host.c
index 57a40b43..2855cd2d 100644
--- a/kern/host.c
+++ b/kern/host.c
@@ -47,12 +47,12 @@
host_data_t realhost;
kern_return_t host_processors(
- host_t host,
+ const host_t host,
processor_array_t *processor_list,
natural_t *countp)
{
- register int i;
- register processor_t *tp;
+ int i;
+ processor_t *tp;
vm_offset_t addr;
unsigned int count;
@@ -95,12 +95,12 @@ kern_return_t host_processors(
}
kern_return_t host_info(
- host_t host,
+ const host_t host,
int flavor,
host_info_t info,
natural_t *count)
{
- register integer_t i, *slot_ptr;
+ integer_t i, *slot_ptr;
if (host == HOST_NULL)
return KERN_INVALID_ARGUMENT;
@@ -109,7 +109,7 @@ kern_return_t host_info(
case HOST_BASIC_INFO:
{
- register host_basic_info_t basic_info;
+ host_basic_info_t basic_info;
/*
* Basic information about this host.
@@ -152,7 +152,7 @@ kern_return_t host_info(
case HOST_SCHED_INFO:
{
- register host_sched_info_t sched_info;
+ host_sched_info_t sched_info;
extern int min_quantum;
/* minimum quantum, in microseconds */
@@ -174,7 +174,7 @@ kern_return_t host_info(
case HOST_LOAD_INFO:
{
- register host_load_info_t load_info;
+ host_load_info_t load_info;
extern long avenrun[3], mach_factor[3];
if (*count < HOST_LOAD_INFO_COUNT)
@@ -204,7 +204,7 @@ kern_return_t host_info(
*/
kern_return_t host_kernel_version(
- host_t host,
+ const host_t host,
kernel_version_t out_version)
{
extern char version[];
@@ -225,7 +225,7 @@ kern_return_t host_kernel_version(
#if MACH_HOST
kern_return_t
host_processor_sets(
- host_t host,
+ const host_t host,
processor_set_name_array_t *pset_list,
natural_t *count)
{
@@ -302,7 +302,7 @@ host_processor_sets(
return KERN_RESOURCE_SHORTAGE;
}
- memcpy((char *) newaddr, (char *) addr, size_needed);
+ memcpy((void *) newaddr, (void *) addr, size_needed);
kfree(addr, size);
psets = (processor_set_t *) newaddr;
}
@@ -324,7 +324,7 @@ host_processor_sets(
*/
kern_return_t
host_processor_sets(
- host_t host,
+ const host_t host,
processor_set_name_array_t *pset_list,
natural_t *count)
{
@@ -362,7 +362,7 @@ host_processor_sets(
*/
kern_return_t
host_processor_set_priv(
- host_t host,
+ const host_t host,
processor_set_t pset_name,
processor_set_t *pset)
{
diff --git a/kern/ipc_host.c b/kern/ipc_host.c
index cd1c11ab..a02eb6f6 100644
--- a/kern/ipc_host.c
+++ b/kern/ipc_host.c
@@ -205,7 +205,7 @@ ipc_pset_terminate(
*/
kern_return_t
processor_set_default(
- host_t host,
+ const host_t host,
processor_set_t *pset)
{
if (host == HOST_NULL)
diff --git a/kern/ipc_kobject.c b/kern/ipc_kobject.c
index 37d4eb99..709ec9ec 100644
--- a/kern/ipc_kobject.c
+++ b/kern/ipc_kobject.c
@@ -49,8 +49,21 @@
#include <vm/memory_object_proxy.h>
#include <device/ds_routines.h>
+#include <kern/mach.server.h>
+#include <ipc/mach_port.server.h>
+#include <kern/mach_host.server.h>
+#include <device/device.server.h>
+#include <device/device_pager.server.h>
+#include <kern/mach4.server.h>
+#include <kern/gnumach.server.h>
+
+#if MACH_DEBUG
+#include <kern/mach_debug.server.h>
+#endif
+
#if MACH_MACHINE_ROUTINES
#include <machine/machine_routines.h>
+#include MACHINE_SERVER_HEADER
#endif
@@ -146,21 +159,6 @@ ipc_kobject_server(request)
* to perform the kernel function
*/
{
- extern mig_routine_t mach_server_routine(),
- mach_port_server_routine(),
- mach_host_server_routine(),
- device_server_routine(),
- device_pager_server_routine(),
- mach4_server_routine(),
- gnumach_server_routine();
-#if MACH_DEBUG
- extern mig_routine_t mach_debug_server_routine();
-#endif
-
-#if MACH_MACHINE_ROUTINES
- extern mig_routine_t MACHINE_SERVER_ROUTINE();
-#endif
-
check_simple_locks();
if ((routine = mach_server_routine(&request->ikm_header)) != 0
|| (routine = mach_port_server_routine(&request->ikm_header)) != 0
@@ -246,7 +244,7 @@ ipc_kobject_server(request)
} else {
/*
* The message contents of the request are intact.
- * Destroy everthing except the reply port right,
+ * Destroy everything except the reply port right,
* which is needed in the reply message.
*/
diff --git a/kern/ipc_kobject.h b/kern/ipc_kobject.h
index cb795741..606a66a9 100644
--- a/kern/ipc_kobject.h
+++ b/kern/ipc_kobject.h
@@ -36,13 +36,12 @@
* Declarations for letting a port represent a kernel object.
*/
-#include <ipc/ipc_kmsg.h>
-#include <ipc/ipc_types.h>
-
#ifndef _KERN_IPC_KOBJECT_H_
#define _KERN_IPC_KOBJECT_H_
#include <mach/machine/vm_types.h>
+#include <ipc/ipc_types.h>
+#include <ipc/ipc_kmsg.h>
typedef vm_offset_t ipc_kobject_t;
diff --git a/kern/ipc_mig.c b/kern/ipc_mig.c
index e1532ac8..22dac420 100644
--- a/kern/ipc_mig.c
+++ b/kern/ipc_mig.c
@@ -37,6 +37,7 @@
#include <kern/task.h>
#include <kern/ipc_kobject.h>
#include <kern/ipc_tt.h>
+#include <kern/ipc_mig.h>
#include <vm/vm_map.h>
#include <vm/vm_user.h>
#include <ipc/port.h>
@@ -91,7 +92,7 @@ mach_msg_send_from_kernel(
mach_msg_return_t
mach_msg_rpc_from_kernel(msg, send_size, reply_size)
- mach_msg_header_t *msg;
+ const mach_msg_header_t *msg;
mach_msg_size_t send_size;
mach_msg_size_t reply_size;
{
@@ -109,8 +110,7 @@ mach_msg_rpc_from_kernel(msg, send_size, reply_size)
*/
void
-mach_msg_abort_rpc(thread)
- ipc_thread_t thread;
+mach_msg_abort_rpc(ipc_thread_t thread)
{
ipc_port_t reply = IP_NULL;
@@ -140,14 +140,14 @@ mach_msg_abort_rpc(thread)
*/
mach_msg_return_t
-mach_msg(msg, option, send_size, rcv_size, rcv_name, time_out, notify)
- mach_msg_header_t *msg;
- mach_msg_option_t option;
- mach_msg_size_t send_size;
- mach_msg_size_t rcv_size;
- mach_port_t rcv_name;
- mach_msg_timeout_t time_out;
- mach_port_t notify;
+mach_msg(
+ mach_msg_header_t *msg,
+ mach_msg_option_t option,
+ mach_msg_size_t send_size,
+ mach_msg_size_t rcv_size,
+ mach_port_t rcv_name,
+ mach_msg_timeout_t time_out,
+ mach_port_t notify)
{
ipc_space_t space = current_space();
vm_map_t map = current_map();
@@ -271,10 +271,10 @@ mig_put_reply_port(
/*
* mig_strncpy.c - by Joshua Block
*
- * mig_strncp -- Bounded string copy. Does what the library routine strncpy
- * OUGHT to do: Copies the (null terminated) string in src into dest, a
- * buffer of length len. Assures that the copy is still null terminated
- * and doesn't overflow the buffer, truncating the copy if necessary.
+ * mig_strncpy -- Bounded string copy. Does what the library routine
+ * strncpy does: Copies the (null terminated) string in src into dest,
+ * a buffer of length len. Returns the length of the destination
+ * string excluding the terminating null.
*
* Parameters:
*
@@ -284,37 +284,44 @@ mig_put_reply_port(
*
* len - Length of destination buffer.
*/
-void mig_strncpy(dest, src, len)
-char *dest, *src;
-int len;
+vm_size_t
+mig_strncpy(dest, src, len)
+ char *dest;
+ const char *src;
+ int len;
{
- int i;
+ char *dest_ = dest;
+ int i;
- if (len <= 0)
- return;
+ if (len <= 0)
+ return 0;
- for (i=1; i<len; i++)
- if (! (*dest++ = *src++))
- return;
+ for (i = 0; i < len; i++) {
+ if (! (*dest = *src))
+ break;
+ dest++;
+ src++;
+ }
- *dest = '\0';
- return;
+ return dest - dest_;
}
#define fast_send_right_lookup(name, port, abort) \
MACRO_BEGIN \
- register ipc_space_t space = current_space(); \
- register ipc_entry_t entry; \
- register mach_port_index_t index = MACH_PORT_INDEX(name); \
+ ipc_space_t space = current_space(); \
+ ipc_entry_t entry; \
\
is_read_lock(space); \
assert(space->is_active); \
\
- if ((index >= space->is_table_size) || \
- (((entry = &space->is_table[index])->ie_bits & \
- (IE_BITS_GEN_MASK|MACH_PORT_TYPE_SEND)) != \
- (MACH_PORT_GEN(name) | MACH_PORT_TYPE_SEND))) { \
- is_read_unlock(space); \
+ entry = ipc_entry_lookup (space, name); \
+ if (entry == IE_NULL) { \
+ is_read_unlock (space); \
+ abort; \
+ } \
+ \
+ if (IE_BITS_TYPE (entry->ie_bits) != MACH_PORT_TYPE_SEND) { \
+ is_read_unlock (space); \
abort; \
} \
\
@@ -327,11 +334,10 @@ MACRO_BEGIN \
MACRO_END
device_t
-port_name_to_device(name)
- mach_port_t name;
+port_name_to_device(mach_port_t name)
{
- register ipc_port_t port;
- register device_t device;
+ ipc_port_t port;
+ device_t device;
fast_send_right_lookup(name, port, goto abort);
/* port is locked */
@@ -371,17 +377,16 @@ port_name_to_device(name)
}
thread_t
-port_name_to_thread(name)
- mach_port_t name;
+port_name_to_thread(mach_port_t name)
{
- register ipc_port_t port;
+ ipc_port_t port;
fast_send_right_lookup(name, port, goto abort);
/* port is locked */
if (ip_active(port) &&
(ip_kotype(port) == IKOT_THREAD)) {
- register thread_t thread;
+ thread_t thread;
thread = (thread_t) port->ip_kobject;
assert(thread != THREAD_NULL);
@@ -417,17 +422,16 @@ port_name_to_thread(name)
}
task_t
-port_name_to_task(name)
- mach_port_t name;
+port_name_to_task(mach_port_t name)
{
- register ipc_port_t port;
+ ipc_port_t port;
fast_send_right_lookup(name, port, goto abort);
/* port is locked */
if (ip_active(port) &&
(ip_kotype(port) == IKOT_TASK)) {
- register task_t task;
+ task_t task;
task = (task_t) port->ip_kobject;
assert(task != TASK_NULL);
@@ -468,14 +472,14 @@ vm_map_t
port_name_to_map(
mach_port_t name)
{
- register ipc_port_t port;
+ ipc_port_t port;
fast_send_right_lookup(name, port, goto abort);
/* port is locked */
if (ip_active(port) &&
(ip_kotype(port) == IKOT_TASK)) {
- register vm_map_t map;
+ vm_map_t map;
map = ((task_t) port->ip_kobject)->map;
assert(map != VM_MAP_NULL);
@@ -513,17 +517,16 @@ port_name_to_map(
}
ipc_space_t
-port_name_to_space(name)
- mach_port_t name;
+port_name_to_space(mach_port_t name)
{
- register ipc_port_t port;
+ ipc_port_t port;
fast_send_right_lookup(name, port, goto abort);
/* port is locked */
if (ip_active(port) &&
(ip_kotype(port) == IKOT_TASK)) {
- register ipc_space_t space;
+ ipc_space_t space;
space = ((task_t) port->ip_kobject)->itk_space;
assert(space != IS_NULL);
@@ -569,12 +572,11 @@ port_name_to_space(name)
* AARGH!
*/
-kern_return_t thread_get_state_KERNEL(thread_port, flavor,
- old_state, old_state_count)
- mach_port_t thread_port; /* port right for thread */
- int flavor;
- thread_state_t old_state; /* pointer to OUT array */
- natural_t *old_state_count; /* IN/OUT */
+kern_return_t thread_get_state_KERNEL(
+ mach_port_t thread_port, /* port right for thread */
+ int flavor,
+ thread_state_t old_state, /* pointer to OUT array */
+ natural_t *old_state_count) /* IN/OUT */
{
thread_t thread;
kern_return_t result;
@@ -586,12 +588,11 @@ kern_return_t thread_get_state_KERNEL(thread_port, flavor,
return result;
}
-kern_return_t thread_set_state_KERNEL(thread_port, flavor,
- new_state, new_state_count)
- mach_port_t thread_port; /* port right for thread */
- int flavor;
- thread_state_t new_state;
- natural_t new_state_count;
+kern_return_t thread_set_state_KERNEL(
+ mach_port_t thread_port, /* port right for thread */
+ int flavor,
+ thread_state_t new_state,
+ natural_t new_state_count)
{
thread_t thread;
kern_return_t result;
@@ -613,7 +614,7 @@ kern_return_t thread_set_state_KERNEL(thread_port, flavor,
* knows to fall back on an RPC. For other return values, it won't
* retry with an RPC. The retry might get a different (incorrect) rc.
* Return values are only set (and should only be set, with copyout)
- * on successfull calls.
+ * on successful calls.
*/
kern_return_t
@@ -650,12 +651,12 @@ syscall_vm_map(
} else
port = (ipc_port_t) memory_object;
- copyin((char *)address, (char *)&addr, sizeof(vm_offset_t));
+ copyin(address, &addr, sizeof(vm_offset_t));
result = vm_map(map, &addr, size, mask, anywhere,
port, offset, copy,
cur_protection, max_protection, inheritance);
if (result == KERN_SUCCESS)
- copyout((char *)&addr, (char *)address, sizeof(vm_offset_t));
+ copyout(&addr, address, sizeof(vm_offset_t));
if (IP_VALID(port))
ipc_port_release_send(port);
vm_map_deallocate(map);
@@ -663,11 +664,11 @@ syscall_vm_map(
return result;
}
-kern_return_t syscall_vm_allocate(target_map, address, size, anywhere)
- mach_port_t target_map;
- vm_offset_t *address;
- vm_size_t size;
- boolean_t anywhere;
+kern_return_t syscall_vm_allocate(
+ mach_port_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ boolean_t anywhere)
{
vm_map_t map;
vm_offset_t addr;
@@ -677,19 +678,19 @@ kern_return_t syscall_vm_allocate(target_map, address, size, anywhere)
if (map == VM_MAP_NULL)
return MACH_SEND_INTERRUPTED;
- copyin((char *)address, (char *)&addr, sizeof(vm_offset_t));
+ copyin(address, &addr, sizeof(vm_offset_t));
result = vm_allocate(map, &addr, size, anywhere);
if (result == KERN_SUCCESS)
- copyout((char *)&addr, (char *)address, sizeof(vm_offset_t));
+ copyout(&addr, address, sizeof(vm_offset_t));
vm_map_deallocate(map);
return result;
}
-kern_return_t syscall_vm_deallocate(target_map, start, size)
- mach_port_t target_map;
- vm_offset_t start;
- vm_size_t size;
+kern_return_t syscall_vm_deallocate(
+ mach_port_t target_map,
+ vm_offset_t start,
+ vm_size_t size)
{
vm_map_t map;
kern_return_t result;
@@ -704,10 +705,10 @@ kern_return_t syscall_vm_deallocate(target_map, start, size)
return result;
}
-kern_return_t syscall_task_create(parent_task, inherit_memory, child_task)
- mach_port_t parent_task;
- boolean_t inherit_memory;
- mach_port_t *child_task; /* OUT */
+kern_return_t syscall_task_create(
+ mach_port_t parent_task,
+ boolean_t inherit_memory,
+ mach_port_t *child_task) /* OUT */
{
task_t t, c;
ipc_port_t port;
@@ -725,7 +726,7 @@ kern_return_t syscall_task_create(parent_task, inherit_memory, child_task)
(void) ipc_kmsg_copyout_object(current_space(),
(ipc_object_t) port,
MACH_MSG_TYPE_PORT_SEND, &name);
- copyout((char *)&name, (char *)child_task,
+ copyout(&name, child_task,
sizeof(mach_port_t));
}
task_deallocate(t);
@@ -733,8 +734,7 @@ kern_return_t syscall_task_create(parent_task, inherit_memory, child_task)
return result;
}
-kern_return_t syscall_task_terminate(task)
- mach_port_t task;
+kern_return_t syscall_task_terminate(mach_port_t task)
{
task_t t;
kern_return_t result;
@@ -749,8 +749,7 @@ kern_return_t syscall_task_terminate(task)
return result;
}
-kern_return_t syscall_task_suspend(task)
- mach_port_t task;
+kern_return_t syscall_task_suspend(mach_port_t task)
{
task_t t;
kern_return_t result;
@@ -765,10 +764,10 @@ kern_return_t syscall_task_suspend(task)
return result;
}
-kern_return_t syscall_task_set_special_port(task, which_port, port_name)
- mach_port_t task;
- int which_port;
- mach_port_t port_name;
+kern_return_t syscall_task_set_special_port(
+ mach_port_t task,
+ int which_port,
+ mach_port_t port_name)
{
task_t t;
ipc_port_t port;
@@ -798,10 +797,10 @@ kern_return_t syscall_task_set_special_port(task, which_port, port_name)
}
kern_return_t
-syscall_mach_port_allocate(task, right, namep)
- mach_port_t task;
- mach_port_right_t right;
- mach_port_t *namep;
+syscall_mach_port_allocate(
+ mach_port_t task,
+ mach_port_right_t right,
+ mach_port_t *namep)
{
ipc_space_t space;
mach_port_t name;
@@ -813,17 +812,17 @@ syscall_mach_port_allocate(task, right, namep)
kr = mach_port_allocate(space, right, &name);
if (kr == KERN_SUCCESS)
- copyout((char *)&name, (char *)namep, sizeof(mach_port_t));
+ copyout(&name, namep, sizeof(mach_port_t));
is_release(space);
return kr;
}
kern_return_t
-syscall_mach_port_allocate_name(task, right, name)
- mach_port_t task;
- mach_port_right_t right;
- mach_port_t name;
+syscall_mach_port_allocate_name(
+ mach_port_t task,
+ mach_port_right_t right,
+ mach_port_t name)
{
ipc_space_t space;
kern_return_t kr;
@@ -839,9 +838,9 @@ syscall_mach_port_allocate_name(task, right, name)
}
kern_return_t
-syscall_mach_port_deallocate(task, name)
- mach_port_t task;
- mach_port_t name;
+syscall_mach_port_deallocate(
+ mach_port_t task,
+ mach_port_t name)
{
ipc_space_t space;
kern_return_t kr;
@@ -857,11 +856,11 @@ syscall_mach_port_deallocate(task, name)
}
kern_return_t
-syscall_mach_port_insert_right(task, name, right, rightType)
- mach_port_t task;
- mach_port_t name;
- mach_port_t right;
- mach_msg_type_name_t rightType;
+syscall_mach_port_insert_right(
+ mach_port_t task,
+ mach_port_t name,
+ mach_port_t right,
+ mach_msg_type_name_t rightType)
{
ipc_space_t space;
ipc_object_t object;
@@ -896,8 +895,7 @@ syscall_mach_port_insert_right(task, name, right, rightType)
return kr;
}
-kern_return_t syscall_thread_depress_abort(thread)
- mach_port_t thread;
+kern_return_t syscall_thread_depress_abort(mach_port_t thread)
{
thread_t t;
kern_return_t result;
@@ -915,10 +913,6 @@ kern_return_t syscall_thread_depress_abort(thread)
/*
* Device traps -- these are way experimental.
*/
-
-extern io_return_t ds_device_write_trap();
-extern io_return_t ds_device_writev_trap();
-
io_return_t
syscall_device_write_request(mach_port_t device_name,
mach_port_t reply_name,
@@ -979,7 +973,7 @@ syscall_device_writev_request(mach_port_t device_name,
vm_size_t iocount)
{
device_t dev;
- ipc_port_t reply_port;
+ /*ipc_port_t reply_port;*/
io_return_t res;
/*
@@ -1000,9 +994,10 @@ syscall_device_writev_request(mach_port_t device_name,
/*
* Translate reply port.
*/
- if (reply_name == MACH_PORT_NULL)
+ /*if (reply_name == MACH_PORT_NULL)
reply_port = IP_NULL;
- else {
+ */
+ if (reply_name != MACH_PORT_NULL) {
/* Homey don't play that. */
device_deallocate(dev);
return KERN_INVALID_RIGHT;
diff --git a/kern/ipc_mig.h b/kern/ipc_mig.h
index f352bdc6..6f063eca 100644
--- a/kern/ipc_mig.h
+++ b/kern/ipc_mig.h
@@ -27,6 +27,7 @@
#define _IPC_MIG_H_
#include <mach/std_types.h>
+#include <device/device_types.h>
/*
* Routine: mach_msg_send_from_kernel
@@ -58,8 +59,84 @@ extern mach_msg_return_t mach_msg_send_from_kernel(
extern void mach_msg_abort_rpc (ipc_thread_t);
extern mach_msg_return_t mach_msg_rpc_from_kernel(
- mach_msg_header_t *msg,
+ const mach_msg_header_t *msg,
mach_msg_size_t send_size,
mach_msg_size_t reply_size);
+extern kern_return_t syscall_vm_map(
+ mach_port_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ mach_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance);
+
+extern kern_return_t syscall_vm_allocate(
+ mach_port_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ boolean_t anywhere);
+
+extern kern_return_t syscall_vm_deallocate(
+ mach_port_t target_map,
+ vm_offset_t start,
+ vm_size_t size);
+
+extern kern_return_t syscall_task_create(
+ mach_port_t parent_task,
+ boolean_t inherit_memory,
+ mach_port_t *child_task);
+
+extern kern_return_t syscall_task_terminate(mach_port_t task);
+
+extern kern_return_t syscall_task_suspend(mach_port_t task);
+
+extern kern_return_t syscall_task_set_special_port(
+ mach_port_t task,
+ int which_port,
+ mach_port_t port_name);
+
+extern kern_return_t syscall_mach_port_allocate(
+ mach_port_t task,
+ mach_port_right_t right,
+ mach_port_t *namep);
+
+extern kern_return_t syscall_mach_port_deallocate(
+ mach_port_t task,
+ mach_port_t name);
+
+extern kern_return_t syscall_mach_port_insert_right(
+ mach_port_t task,
+ mach_port_t name,
+ mach_port_t right,
+ mach_msg_type_name_t rightType);
+
+extern kern_return_t syscall_mach_port_allocate_name(
+ mach_port_t task,
+ mach_port_right_t right,
+ mach_port_t name);
+
+extern kern_return_t syscall_thread_depress_abort(mach_port_t thread);
+
+extern io_return_t syscall_device_write_request(
+ mach_port_t device_name,
+ mach_port_t reply_name,
+ dev_mode_t mode,
+ recnum_t recnum,
+ vm_offset_t data,
+ vm_size_t data_count);
+
+io_return_t syscall_device_writev_request(
+ mach_port_t device_name,
+ mach_port_t reply_name,
+ dev_mode_t mode,
+ recnum_t recnum,
+ io_buf_vec_t *iovec,
+ vm_size_t iocount);
+
#endif /* _IPC_MIG_H_ */
diff --git a/kern/ipc_sched.c b/kern/ipc_sched.c
index 615ad608..be82971b 100644
--- a/kern/ipc_sched.c
+++ b/kern/ipc_sched.c
@@ -182,9 +182,9 @@ thread_will_wait_with_timeout(
boolean_t
thread_handoff(
- register thread_t old,
- register continuation_t continuation,
- register thread_t new)
+ thread_t old,
+ continuation_t continuation,
+ thread_t new)
{
spl_t s;
@@ -214,7 +214,7 @@ thread_handoff(
thread_unlock(new);
(void) splx(s);
- counter_always(c_thread_handoff_misses++);
+ counter(c_thread_handoff_misses++);
return FALSE;
}
@@ -268,7 +268,7 @@ thread_handoff(
*/
old->wake_active = FALSE;
thread_unlock(old);
- thread_wakeup((event_t)&old->wake_active);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(old));
goto after_old_thread;
}
} else
@@ -278,6 +278,6 @@ thread_handoff(
after_old_thread:
(void) splx(s);
- counter_always(c_thread_handoff_hits++);
+ counter(c_thread_handoff_hits++);
return TRUE;
}
diff --git a/kern/ipc_tt.c b/kern/ipc_tt.c
index 6d32e5b0..e4d657b7 100644
--- a/kern/ipc_tt.c
+++ b/kern/ipc_tt.c
@@ -72,7 +72,7 @@ ipc_task_init(
int i;
- kr = ipc_space_create(&ipc_table_entries[0], &space);
+ kr = ipc_space_create(&space);
if (kr != KERN_SUCCESS)
panic("ipc_task_init");
@@ -213,8 +213,7 @@ ipc_task_terminate(
*/
void
-ipc_thread_init(thread)
- thread_t thread;
+ipc_thread_init(thread_t thread)
{
ipc_port_t kport;
@@ -243,8 +242,7 @@ ipc_thread_init(thread)
*/
void
-ipc_thread_enable(thread)
- thread_t thread;
+ipc_thread_enable(thread_t thread)
{
ipc_port_t kport;
@@ -264,8 +262,7 @@ ipc_thread_enable(thread)
*/
void
-ipc_thread_disable(thread)
- thread_t thread;
+ipc_thread_disable(thread_t thread)
{
ipc_port_t kport;
@@ -286,8 +283,7 @@ ipc_thread_disable(thread)
*/
void
-ipc_thread_terminate(thread)
- thread_t thread;
+ipc_thread_terminate(thread_t thread)
{
ipc_port_t kport;
@@ -387,9 +383,9 @@ retrieve_thread_self(thread)
ipc_port_t
retrieve_task_self_fast(
- register task_t task)
+ task_t task)
{
- register ipc_port_t port;
+ ipc_port_t port;
assert(task == current_task());
@@ -424,10 +420,9 @@ retrieve_task_self_fast(
*/
ipc_port_t
-retrieve_thread_self_fast(thread)
- register thread_t thread;
+retrieve_thread_self_fast(thread_t thread)
{
- register ipc_port_t port;
+ ipc_port_t port;
assert(thread == current_thread());
@@ -648,9 +643,9 @@ task_get_special_port(
kern_return_t
task_set_special_port(
- task_t task,
- int which,
- ipc_port_t port)
+ task_t task,
+ int which,
+ const ipc_port_t port)
{
ipc_port_t *whichp;
ipc_port_t old;
@@ -705,10 +700,10 @@ task_set_special_port(
*/
kern_return_t
-thread_get_special_port(thread, which, portp)
- thread_t thread;
- int which;
- ipc_port_t *portp;
+thread_get_special_port(
+ thread_t thread,
+ int which,
+ ipc_port_t *portp)
{
ipc_port_t *whichp;
ipc_port_t port;
@@ -758,10 +753,10 @@ thread_get_special_port(thread, which, portp)
*/
kern_return_t
-thread_set_special_port(thread, which, port)
- thread_t thread;
- int which;
- ipc_port_t port;
+thread_set_special_port(
+ thread_t thread,
+ int which,
+ ipc_port_t port)
{
ipc_port_t *whichp;
ipc_port_t old;
@@ -890,10 +885,10 @@ mach_ports_register(
*/
kern_return_t
-mach_ports_lookup(task, portsp, portsCnt)
- task_t task;
- ipc_port_t **portsp;
- mach_msg_type_number_t *portsCnt;
+mach_ports_lookup(
+ task_t task,
+ ipc_port_t **portsp,
+ mach_msg_type_number_t *portsCnt)
{
vm_offset_t memory;
vm_size_t size;
@@ -1003,8 +998,7 @@ convert_port_to_space(
*/
vm_map_t
-convert_port_to_map(port)
- ipc_port_t port;
+convert_port_to_map(ipc_port_t port)
{
vm_map_t map = VM_MAP_NULL;
@@ -1032,8 +1026,7 @@ convert_port_to_map(port)
*/
thread_t
-convert_port_to_thread(port)
- ipc_port_t port;
+convert_port_to_thread(ipc_port_t port)
{
thread_t thread = THREAD_NULL;
@@ -1061,8 +1054,7 @@ convert_port_to_thread(port)
*/
ipc_port_t
-convert_task_to_port(task)
- task_t task;
+convert_task_to_port(task_t task)
{
ipc_port_t port;
@@ -1088,8 +1080,7 @@ convert_task_to_port(task)
*/
ipc_port_t
-convert_thread_to_port(thread)
- thread_t thread;
+convert_thread_to_port(thread_t thread)
{
ipc_port_t port;
@@ -1113,8 +1104,7 @@ convert_thread_to_port(thread)
*/
void
-space_deallocate(space)
- ipc_space_t space;
+space_deallocate(ipc_space_t space)
{
if (space != IS_NULL)
is_release(space);
diff --git a/kern/list.h b/kern/list.h
index ad782a8a..be927625 100644
--- a/kern/list.h
+++ b/kern/list.h
@@ -31,9 +31,7 @@
#include <stddef.h>
#include <sys/types.h>
-
-#define structof(ptr, type, member) \
- ((type *)((char *)ptr - offsetof(type, member)))
+#include <kern/macros.h>
/*
* Structure used as both head and node.
diff --git a/kern/lock.c b/kern/lock.c
index 44d4448e..1daf1b4d 100644
--- a/kern/lock.c
+++ b/kern/lock.c
@@ -133,12 +133,25 @@ unsigned int simple_locks_taken = 0;
struct simple_locks_info {
simple_lock_t l;
- unsigned int ra;
+ const char *expr;
+ const char *loc;
} simple_locks_info[NSLINFO];
+int do_check_simple_locks = 1;
+
void check_simple_locks(void)
{
- assert(simple_locks_taken == 0);
+ assert(! do_check_simple_locks || simple_locks_taken == 0);
+}
+
+void check_simple_locks_enable(void)
+{
+ do_check_simple_locks = 1;
+}
+
+void check_simple_locks_disable(void)
+{
+ do_check_simple_locks = 0;
}
/* Need simple lock sanity checking code if simple locks are being
@@ -150,8 +163,10 @@ void simple_lock_init(
l->lock_data = 0;
}
-void simple_lock(
- simple_lock_t l)
+void _simple_lock(
+ simple_lock_t l,
+ const char *expression,
+ const char *location)
{
struct simple_locks_info *info;
@@ -161,14 +176,14 @@ void simple_lock(
info = &simple_locks_info[simple_locks_taken++];
info->l = l;
- /* XXX we want our return address, if possible */
-#if defined(__i386__)
- info->ra = *((unsigned long *)&l - 1);
-#endif /* i386 */
+ info->expr = expression;
+ info->loc = location;
}
-boolean_t simple_lock_try(
- simple_lock_t l)
+boolean_t _simple_lock_try(
+ simple_lock_t l,
+ const char *expression,
+ const char *location)
{
struct simple_locks_info *info;
@@ -179,10 +194,8 @@ boolean_t simple_lock_try(
info = &simple_locks_info[simple_locks_taken++];
info->l = l;
- /* XXX we want our return address, if possible */
-#if defined(__i386__)
- info->ra = *((unsigned long *)&l - 1);
-#endif /* i386 */
+ info->expr = expression;
+ info->loc = location;
return TRUE;
}
@@ -207,6 +220,7 @@ void simple_unlock(
simple_locks_info[i] = simple_locks_info[simple_locks_taken-1];
}
simple_locks_taken--;
+ simple_locks_info[simple_locks_taken] = (struct simple_locks_info) {0};
}
#endif /* MACH_SLOCKS && NCPUS == 1 */
@@ -250,9 +264,9 @@ void lock_sleepable(
*/
void lock_write(
- register lock_t l)
+ lock_t l)
{
- register int i;
+ int i;
check_simple_locks();
simple_lock(&l->interlock);
@@ -304,11 +318,14 @@ void lock_write(
simple_lock(&l->interlock);
}
}
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
simple_unlock(&l->interlock);
}
void lock_done(
- register lock_t l)
+ lock_t l)
{
simple_lock(&l->interlock);
@@ -320,8 +337,12 @@ void lock_done(
else
if (l->want_upgrade)
l->want_upgrade = FALSE;
- else
+ else {
l->want_write = FALSE;
+#if MACH_LDEBUG
+ l->writer = THREAD_NULL;
+#endif /* MACH_LDEBUG */
+ }
/*
* There is no reason to wakeup a waiting thread
@@ -340,9 +361,9 @@ void lock_done(
}
void lock_read(
- register lock_t l)
+ lock_t l)
{
- register int i;
+ int i;
check_simple_locks();
simple_lock(&l->interlock);
@@ -387,9 +408,9 @@ void lock_read(
* Returns TRUE if the upgrade *failed*.
*/
boolean_t lock_read_to_write(
- register lock_t l)
+ lock_t l)
{
- register int i;
+ int i;
check_simple_locks();
simple_lock(&l->interlock);
@@ -438,14 +459,20 @@ boolean_t lock_read_to_write(
}
}
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
simple_unlock(&l->interlock);
return FALSE;
}
void lock_write_to_read(
- register lock_t l)
+ lock_t l)
{
simple_lock(&l->interlock);
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+#endif /* MACH_LDEBUG */
l->read_count++;
if (l->recursion_depth != 0)
@@ -461,6 +488,9 @@ void lock_write_to_read(
thread_wakeup(l);
}
+#if MACH_LDEBUG
+ l->writer = THREAD_NULL;
+#endif /* MACH_LDEBUG */
simple_unlock(&l->interlock);
}
@@ -474,7 +504,7 @@ void lock_write_to_read(
*/
boolean_t lock_try_write(
- register lock_t l)
+ lock_t l)
{
simple_lock(&l->interlock);
@@ -500,6 +530,9 @@ boolean_t lock_try_write(
*/
l->want_write = TRUE;
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
simple_unlock(&l->interlock);
return TRUE;
}
@@ -513,7 +546,7 @@ boolean_t lock_try_write(
*/
boolean_t lock_try_read(
- register lock_t l)
+ lock_t l)
{
simple_lock(&l->interlock);
@@ -547,7 +580,7 @@ boolean_t lock_try_read(
* Returns FALSE if the upgrade *failed*.
*/
boolean_t lock_try_read_to_write(
- register lock_t l)
+ lock_t l)
{
check_simple_locks();
simple_lock(&l->interlock);
@@ -576,6 +609,9 @@ boolean_t lock_try_read_to_write(
simple_lock(&l->interlock);
}
+#if MACH_LDEBUG
+ l->writer = current_thread();
+#endif /* MACH_LDEBUG */
simple_unlock(&l->interlock);
return TRUE;
}
@@ -588,6 +624,10 @@ void lock_set_recursive(
lock_t l)
{
simple_lock(&l->interlock);
+#if MACH_LDEBUG
+ assert(l->writer == current_thread());
+#endif /* MACH_LDEBUG */
+
if (!l->want_write) {
panic("lock_set_recursive: don't have write lock");
}
@@ -620,13 +660,9 @@ void db_show_all_slocks(void)
for (i = 0; i < simple_locks_taken; i++) {
info = &simple_locks_info[i];
- db_printf("%d: ", i);
+ db_printf("%d: %s (", i, info->expr);
db_printsym(info->l, DB_STGY_ANY);
-#if defined(__i386__)
- db_printf(" locked by ");
- db_printsym(info->ra, DB_STGY_PROC);
-#endif
- db_printf("\n");
+ db_printf(") locked by %s\n", info->loc);
}
}
#else /* MACH_SLOCKS && NCPUS == 1 */
diff --git a/kern/lock.h b/kern/lock.h
index 4f38ea33..2781a48a 100644
--- a/kern/lock.h
+++ b/kern/lock.h
@@ -49,8 +49,15 @@
struct slock {
volatile natural_t lock_data; /* in general 1 bit is sufficient */
+ struct {} is_a_simple_lock;
};
+/*
+ * Used by macros to assert that the given argument is a simple
+ * lock.
+ */
+#define simple_lock_assert(l) (void) &(l)->is_a_simple_lock
+
typedef struct slock simple_lock_data_t;
typedef struct slock *simple_lock_t;
@@ -62,7 +69,8 @@ typedef struct slock *simple_lock_t;
#define decl_simple_lock_data(class,name) \
class simple_lock_data_t name;
-#define simple_lock_addr(lock) (&(lock))
+#define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \
+ &(lock))
#if (NCPUS > 1)
@@ -70,8 +78,11 @@ class simple_lock_data_t name;
* The single-CPU debugging routines are not valid
* on a multiprocessor.
*/
-#define simple_lock_taken(lock) (1) /* always succeeds */
+#define simple_lock_taken(lock) (simple_lock_assert(lock), \
+ 1) /* always succeeds */
#define check_simple_locks()
+#define check_simple_locks_enable()
+#define check_simple_locks_disable()
#else /* NCPUS > 1 */
/*
@@ -79,14 +90,28 @@ class simple_lock_data_t name;
*/
extern void simple_lock_init(simple_lock_t);
-extern void simple_lock(simple_lock_t);
+extern void _simple_lock(simple_lock_t,
+ const char *, const char *);
extern void simple_unlock(simple_lock_t);
-extern boolean_t simple_lock_try(simple_lock_t);
+extern boolean_t _simple_lock_try(simple_lock_t,
+ const char *, const char *);
+
+/* We provide simple_lock and simple_lock_try so that we can save the
+ location. */
+#define XSTR(x) #x
+#define STR(x) XSTR(x)
+#define LOCATION __FILE__ ":" STR(__LINE__)
+
+#define simple_lock(lock) _simple_lock((lock), #lock, LOCATION)
+#define simple_lock_try(lock) _simple_lock_try((lock), #lock, LOCATION)
#define simple_lock_pause()
-#define simple_lock_taken(lock) ((lock)->lock_data)
+#define simple_lock_taken(lock) (simple_lock_assert(lock), \
+ (lock)->lock_data)
extern void check_simple_locks(void);
+extern void check_simple_locks_enable(void);
+extern void check_simple_locks_disable(void);
#endif /* NCPUS > 1 */
@@ -94,18 +119,25 @@ extern void check_simple_locks(void);
/*
* Do not allocate storage for locks if not needed.
*/
-#define decl_simple_lock_data(class,name)
-#define simple_lock_addr(lock) ((simple_lock_t)0)
+struct simple_lock_data_empty { struct {} is_a_simple_lock; };
+#define decl_simple_lock_data(class,name) \
+class struct simple_lock_data_empty name;
+#define simple_lock_addr(lock) (simple_lock_assert(&(lock)), \
+ (simple_lock_t)0)
/*
* No multiprocessor locking is necessary.
*/
-#define simple_lock_init(l)
-#define simple_lock(l)
-#define simple_unlock(l)
-#define simple_lock_try(l) (TRUE) /* always succeeds */
-#define simple_lock_taken(l) (1) /* always succeeds */
+#define simple_lock_init(l) simple_lock_assert(l)
+#define simple_lock(l) simple_lock_assert(l)
+#define simple_unlock(l) simple_lock_assert(l)
+#define simple_lock_try(l) (simple_lock_assert(l), \
+ TRUE) /* always succeeds */
+#define simple_lock_taken(l) (simple_lock_assert(l), \
+ 1) /* always succeeds */
#define check_simple_locks()
+#define check_simple_locks_enable()
+#define check_simple_locks_disable()
#define simple_lock_pause()
#endif /* MACH_SLOCKS */
@@ -142,6 +174,9 @@ struct lock {
/* boolean_t */ can_sleep:1, /* Can attempts to lock go to sleep? */
recursion_depth:12, /* Depth of recursion */
:0;
+#if MACH_LDEBUG
+ struct thread *writer;
+#endif /* MACH_LDEBUG */
decl_simple_lock_data(,interlock)
/* Hardware interlock field.
Last in the structure so that
@@ -171,6 +206,17 @@ extern boolean_t lock_try_read_to_write(lock_t);
extern void lock_set_recursive(lock_t);
extern void lock_clear_recursive(lock_t);
+/* Lock debugging support. */
+#if ! MACH_LDEBUG
+#define have_read_lock(l) 1
+#define have_write_lock(l) 1
+#else /* MACH_LDEBUG */
+/* XXX: We don't keep track of readers, so this is an approximation. */
+#define have_read_lock(l) ((l)->read_count > 0)
+#define have_write_lock(l) ((l)->writer == current_thread())
+#endif /* MACH_LDEBUG */
+#define have_lock(l) (have_read_lock(l) || have_write_lock(l))
+
void db_show_all_slocks(void);
#endif /* _KERN_LOCK_H_ */
diff --git a/kern/lock_mon.c b/kern/lock_mon.c
index 14504281..f6bbd5dd 100644
--- a/kern/lock_mon.c
+++ b/kern/lock_mon.c
@@ -64,7 +64,6 @@ typedef unsigned int time_stamp_t;
#define LOCK_INFO_HASH_COUNT 1024
#define LOCK_INFO_PER_BUCKET (LOCK_INFO_MAX/LOCK_INFO_HASH_COUNT)
-
#define HASH_LOCK(lock) ((long)lock>>5 & (LOCK_INFO_HASH_COUNT-1))
struct lock_info {
@@ -85,7 +84,7 @@ struct lock_info_bucket lock_info[LOCK_INFO_HASH_COUNT];
struct lock_info default_lock_info;
unsigned default_lock_stack = 0;
-extern int curr_ipl[];
+extern spl_t curr_ipl[];
@@ -94,8 +93,8 @@ locate_lock_info(lock)
decl_simple_lock_data(, **lock)
{
struct lock_info *li = &(lock_info[HASH_LOCK(*lock)].info[0]);
- register i;
- register my_cpu = cpu_number();
+ int i;
+ my_cpu = cpu_number();
for (i=0; i < LOCK_INFO_PER_BUCKET; i++, li++)
if (li->lock) {
@@ -112,11 +111,11 @@ decl_simple_lock_data(, **lock)
}
-simple_lock(lock)
+void simple_lock(lock)
decl_simple_lock_data(, *lock)
{
- register struct lock_info *li = locate_lock_info(&lock);
- register my_cpu = cpu_number();
+ struct lock_info *li = locate_lock_info(&lock);
+ my_cpu = cpu_number();
if (current_thread())
li->stack = current_thread()->lock_stack++;
@@ -131,11 +130,11 @@ decl_simple_lock_data(, *lock)
li->time = time_stamp - li->time;
}
-simple_lock_try(lock)
+int simple_lock_try(lock)
decl_simple_lock_data(, *lock)
{
- register struct lock_info *li = locate_lock_info(&lock);
- register my_cpu = cpu_number();
+ struct lock_info *li = locate_lock_info(&lock);
+ my_cpu = cpu_number();
if (curr_ipl[my_cpu])
li->masked++;
@@ -151,12 +150,12 @@ decl_simple_lock_data(, *lock)
}
}
-simple_unlock(lock)
+void simple_unlock(lock)
decl_simple_lock_data(, *lock)
{
- register time_stamp_t stamp = time_stamp;
- register time_stamp_t *time = &locate_lock_info(&lock)->time;
- register unsigned *lock_stack;
+ time_stamp_t stamp = time_stamp;
+ time_stamp_t *time = &locate_lock_info(&lock)->time;
+ unsigned *lock_stack;
*time = stamp - *time;
_simple_unlock(lock);
@@ -167,16 +166,13 @@ decl_simple_lock_data(, *lock)
}
}
-lip() {
+void lip(void) {
lis(4, 1, 0);
}
#define lock_info_sort lis
-unsigned scurval, ssum;
-struct lock_info *sli;
-
-lock_info_sort(arg, abs, count)
+void lock_info_sort(arg, abs, count)
{
struct lock_info *li, mean;
int bucket = 0;
@@ -215,9 +211,6 @@ lock_info_sort(arg, abs, count)
sum = li->success + li->fail;
if(!sum && !abs)
continue;
- scurval = curval;
- ssum = sum;
- sli = li;
if (!abs) switch(arg) {
case 0:
break;
@@ -257,7 +250,7 @@ lock_info_sort(arg, abs, count)
#define lock_info_clear lic
-lock_info_clear()
+void lock_info_clear(void)
{
struct lock_info *li;
int bucket = 0;
@@ -271,7 +264,7 @@ lock_info_clear()
memset(&default_lock_info, 0, sizeof(struct lock_info));
}
-print_lock_info(li)
+void print_lock_info(li)
struct lock_info *li;
{
int off;
@@ -299,11 +292,11 @@ struct lock_info *li;
* Measure lock/unlock operations
*/
-time_lock(loops)
+void time_lock(int loops)
{
decl_simple_lock_data(, lock)
- register time_stamp_t stamp;
- register int i;
+ time_stamp_t stamp;
+ int i;
if (!loops)
@@ -340,7 +333,7 @@ void
retry_simple_lock(lock)
decl_simple_lock_data(, *lock)
{
- register count = 0;
+ count = 0;
while(!simple_lock_try(lock))
if (count++ > 1000000 && lock != &kdb_lock) {
@@ -356,7 +349,7 @@ decl_simple_lock_data(, *lock)
void
retry_bit_lock(index, addr)
{
- register count = 0;
+ count = 0;
while(!bit_lock_try(index, addr))
if (count++ > 1000000) {
diff --git a/kern/log2.h b/kern/log2.h
new file mode 100644
index 00000000..0e67701c
--- /dev/null
+++ b/kern/log2.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Integer base 2 logarithm operations.
+ */
+
+#ifndef _KERN_LOG2_H
+#define _KERN_LOG2_H
+
+#include <kern/assert.h>
+
+#ifdef __LP64__
+#define LONG_BIT 64
+#else /* __LP64__ */
+#define LONG_BIT 32
+#endif /* __LP64__ */
+
+static inline unsigned int
+ilog2(unsigned long x)
+{
+ assert(x != 0);
+ return LONG_BIT - __builtin_clzl(x) - 1;
+}
+
+static inline unsigned int
+iorder2(unsigned long size)
+{
+ assert(size != 0);
+
+ if (size == 1)
+ return 0;
+
+ return ilog2(size - 1) + 1;
+}
+
+#endif /* _KERN_LOG2_H */
diff --git a/kern/mach_clock.c b/kern/mach_clock.c
index edf87f07..1817ce22 100644
--- a/kern/mach_clock.c
+++ b/kern/mach_clock.c
@@ -27,7 +27,7 @@
* the rights to redistribute these changes.
*/
/*
- * File: clock_prim.c
+ * File: mach_clock.c
* Author: Avadis Tevanian, Jr.
* Date: 1986
*
@@ -54,6 +54,7 @@
#include <kern/thread.h>
#include <kern/time_stamp.h>
#include <kern/timer.h>
+#include <kern/priority.h>
#include <vm/vm_kern.h>
#include <sys/time.h>
#include <machine/mach_param.h> /* HZ */
@@ -64,8 +65,6 @@
#include <kern/pc_sample.h>
#endif
-void softclock(); /* forward */
-
int hz = HZ; /* number of ticks per second */
int tick = (1000000 / HZ); /* number of usec per tick */
time_value_t time = { 0, 0 }; /* time since bootup (uncorrected) */
@@ -86,26 +85,36 @@ int bigadj = 1000000; /* adjust 10*tickadj if adjustment
* This update protocol, with a check value, allows
* do {
* secs = mtime->seconds;
+ * __sync_synchronize();
* usecs = mtime->microseconds;
+ * __sync_synchronize();
* } while (secs != mtime->check_seconds);
- * to read the time correctly. (On a multiprocessor this assumes
- * that processors see each other's writes in the correct order.
- * We have to insert write fence operations.) FIXME
+ * to read the time correctly.
*/
-mapped_time_value_t *mtime = 0;
+volatile mapped_time_value_t *mtime = 0;
#define update_mapped_time(time) \
MACRO_BEGIN \
if (mtime != 0) { \
mtime->check_seconds = (time)->seconds; \
- asm volatile("":::"memory"); \
+ __sync_synchronize(); \
mtime->microseconds = (time)->microseconds; \
- asm volatile("":::"memory"); \
+ __sync_synchronize(); \
mtime->seconds = (time)->seconds; \
} \
MACRO_END
+#define read_mapped_time(time) \
+MACRO_BEGIN \
+ do { \
+ time->seconds = mtime->seconds; \
+ __sync_synchronize(); \
+ time->microseconds = mtime->microseconds; \
+ __sync_synchronize(); \
+ } while (time->seconds != mtime->check_seconds); \
+MACRO_END
+
decl_simple_lock_data(, timer_lock) /* lock for ... */
timer_elt_data_t timer_head; /* ordered list of timeouts */
/* (doubles as end-of-list) */
@@ -121,13 +130,13 @@ timer_elt_data_t timer_head; /* ordered list of timeouts */
* the accuracy of the hardware clock.
*
*/
-void clock_interrupt(usec, usermode, basepri)
- register int usec; /* microseconds per tick */
- boolean_t usermode; /* executing user code */
- boolean_t basepri; /* at base priority */
+void clock_interrupt(
+ int usec, /* microseconds per tick */
+ boolean_t usermode, /* executing user code */
+ boolean_t basepri) /* at base priority */
{
- register int my_cpu = cpu_number();
- register thread_t thread = current_thread();
+ int my_cpu = cpu_number();
+ thread_t thread = current_thread();
counter(c_clock_ticks++);
counter(c_threads_total += c_threads_current);
@@ -150,8 +159,7 @@ void clock_interrupt(usec, usermode, basepri)
* Increment the CPU time statistics.
*/
{
- extern void thread_quantum_update(); /* in priority.c */
- register int state;
+ int state;
if (usermode)
state = CPU_STATE_USER;
@@ -187,8 +195,8 @@ void clock_interrupt(usec, usermode, basepri)
*/
if (my_cpu == master_cpu) {
- register spl_t s;
- register timer_elt_t telt;
+ spl_t s;
+ timer_elt_t telt;
boolean_t needsoft = FALSE;
#if TS_FORMAT == 1
@@ -221,11 +229,19 @@ void clock_interrupt(usec, usermode, basepri)
time_value_add_usec(&time, usec);
}
else {
- register int delta;
+ int delta;
if (timedelta < 0) {
- delta = usec - tickdelta;
- timedelta += tickdelta;
+ if (usec > tickdelta) {
+ delta = usec - tickdelta;
+ timedelta += tickdelta;
+ } else {
+ /* Not enough time has passed, defer overflowing
+ * correction for later, keep only one microsecond
+ * delta */
+ delta = 1;
+ timedelta += usec - 1;
+ }
}
else {
delta = usec + tickdelta;
@@ -236,7 +252,7 @@ void clock_interrupt(usec, usermode, basepri)
update_mapped_time(&time);
/*
- * Schedule soft-interupt for timeout if needed
+ * Schedule soft-interrupt for timeout if needed
*/
if (needsoft) {
if (basepri) {
@@ -272,15 +288,15 @@ void clock_interrupt(usec, usermode, basepri)
* and corrupts it.
*/
-void softclock()
+void softclock(void)
{
/*
* Handle timeouts.
*/
spl_t s;
- register timer_elt_t telt;
- register void (*fcn)( void * param );
- register void *param;
+ timer_elt_t telt;
+ void (*fcn)( void * param );
+ void *param;
while (TRUE) {
s = splsched();
@@ -311,12 +327,12 @@ void softclock()
* telt timer element. Function and param are already set.
* interval time-out interval, in hz.
*/
-void set_timeout(telt, interval)
- register timer_elt_t telt; /* already loaded */
- register unsigned int interval;
+void set_timeout(
+ timer_elt_t telt, /* already loaded */
+ unsigned int interval)
{
spl_t s;
- register timer_elt_t next;
+ timer_elt_t next;
s = splsched();
simple_lock(&timer_lock);
@@ -341,8 +357,7 @@ void set_timeout(telt, interval)
splx(s);
}
-boolean_t reset_timeout(telt)
- register timer_elt_t telt;
+boolean_t reset_timeout(timer_elt_t telt)
{
spl_t s;
@@ -362,7 +377,7 @@ boolean_t reset_timeout(telt)
}
}
-void init_timeout()
+void init_timeout(void)
{
simple_lock_init(&timer_lock);
queue_init(&timer_head.chain);
@@ -370,17 +385,47 @@ void init_timeout()
elapsed_ticks = 0;
}
+
+/*
+ * We record timestamps using the boot-time clock. We keep track of
+ * the boot-time clock by storing the difference to the real-time
+ * clock.
+ */
+struct time_value clock_boottime_offset;
+
+/*
+ * Update the offset of the boot-time clock from the real-time clock.
+ * This function must be called when the real-time clock is updated.
+ * This function must be called at SPLHIGH.
+ */
+void
+clock_boottime_update(struct time_value *new_time)
+{
+ struct time_value delta = time;
+ time_value_sub(&delta, new_time);
+ time_value_add(&clock_boottime_offset, &delta);
+}
/*
- * Record a timestamp in STAMP.
+ * Record a timestamp in STAMP. Records values in the boot-time clock
+ * frame.
*/
void
record_time_stamp (time_value_t *stamp)
{
- do {
- stamp->seconds = mtime->seconds;
- stamp->microseconds = mtime->microseconds;
- } while (stamp->seconds != mtime->check_seconds);
+ read_mapped_time(stamp);
+ time_value_add(stamp, &clock_boottime_offset);
+}
+
+/*
+ * Read a timestamp in STAMP into RESULT. Returns values in the
+ * real-time clock frame.
+ */
+void
+read_time_stamp (time_value_t *stamp, time_value_t *result)
+{
+ *result = *stamp;
+ time_value_sub(result, &clock_boottime_offset);
}
@@ -389,17 +434,13 @@ record_time_stamp (time_value_t *stamp)
*/
kern_return_t
host_get_time(host, current_time)
- host_t host;
+ const host_t host;
time_value_t *current_time; /* OUT */
{
if (host == HOST_NULL)
return(KERN_INVALID_HOST);
- do {
- current_time->seconds = mtime->seconds;
- current_time->microseconds = mtime->microseconds;
- } while (current_time->seconds != mtime->check_seconds);
-
+ read_mapped_time(current_time);
return (KERN_SUCCESS);
}
@@ -408,7 +449,7 @@ host_get_time(host, current_time)
*/
kern_return_t
host_set_time(host, new_time)
- host_t host;
+ const host_t host;
time_value_t new_time;
{
spl_t s;
@@ -426,6 +467,7 @@ host_set_time(host, new_time)
#endif /* NCPUS > 1 */
s = splhigh();
+ clock_boottime_update(&new_time);
time = new_time;
update_mapped_time(&time);
resettodr();
@@ -446,7 +488,7 @@ host_set_time(host, new_time)
*/
kern_return_t
host_adjust_time(host, new_adjustment, old_adjustment)
- host_t host;
+ const host_t host;
time_value_t new_adjustment;
time_value_t *old_adjustment; /* OUT */
{
@@ -492,22 +534,22 @@ host_adjust_time(host, new_adjustment, old_adjustment)
return (KERN_SUCCESS);
}
-void mapable_time_init()
+void mapable_time_init(void)
{
if (kmem_alloc_wired(kernel_map, (vm_offset_t *) &mtime, PAGE_SIZE)
!= KERN_SUCCESS)
panic("mapable_time_init");
- memset(mtime, 0, PAGE_SIZE);
+ memset((void *) mtime, 0, PAGE_SIZE);
update_mapped_time(&time);
}
-int timeopen()
+int timeopen(dev_t dev, int flag, io_req_t ior)
{
return(0);
}
-int timeclose()
+void timeclose(dev_t dev, int flag)
{
- return(0);
+ return;
}
/*
@@ -528,13 +570,13 @@ timer_elt_data_t timeout_timers[NTIMERS];
* param: parameter to pass to function
* interval: timeout interval, in hz.
*/
-void timeout(fcn, param, interval)
- void (*fcn)( void * param );
- void * param;
- int interval;
+void timeout(
+ void (*fcn)(void *param),
+ void * param,
+ int interval)
{
spl_t s;
- register timer_elt_t elt;
+ timer_elt_t elt;
s = splsched();
simple_lock(&timer_lock);
@@ -557,11 +599,11 @@ void timeout(fcn, param, interval)
* and removed.
*/
boolean_t untimeout(fcn, param)
- register void (*fcn)( void * param );
- register void * param;
+ void (*fcn)( void * param );
+ const void * param;
{
spl_t s;
- register timer_elt_t elt;
+ timer_elt_t elt;
s = splsched();
simple_lock(&timer_lock);
diff --git a/kern/mach_clock.h b/kern/mach_clock.h
index 4e4e8ff1..1af0cdae 100644
--- a/kern/mach_clock.h
+++ b/kern/mach_clock.h
@@ -29,6 +29,10 @@
#include <mach/time_value.h>
#include <kern/host.h>
#include <kern/queue.h>
+#include <sys/types.h>
+
+struct io_req;
+typedef struct io_req *io_req_t;
/* Timers in kernel. */
@@ -82,9 +86,18 @@ extern boolean_t reset_timeout(timer_elt_t telt);
extern void init_timeout (void);
-/* Read the current time into STAMP. */
+/*
+ * Record a timestamp in STAMP. Records values in the boot-time clock
+ * frame.
+ */
extern void record_time_stamp (time_value_t *stamp);
+/*
+ * Read a timestamp in STAMP into RESULT. Returns values in the
+ * real-time clock frame.
+ */
+extern void read_time_stamp (time_value_t *stamp, time_value_t *result);
+
extern kern_return_t host_get_time(
host_t host,
time_value_t *current_time);
@@ -102,6 +115,9 @@ extern void mapable_time_init (void);
/* For public timer elements. */
extern void timeout(timer_func_t *fcn, void *param, int interval);
-extern boolean_t untimeout(timer_func_t *fcn, void *param);
+extern boolean_t untimeout(timer_func_t *fcn, const void *param);
+
+extern int timeopen(dev_t dev, int flag, io_req_t ior);
+extern void timeclose(dev_t dev, int flag);
#endif /* _KERN_MACH_CLOCK_H_ */
diff --git a/kern/mach_factor.c b/kern/mach_factor.c
index 558c4a06..debce0b0 100644
--- a/kern/mach_factor.c
+++ b/kern/mach_factor.c
@@ -55,13 +55,13 @@ static long fract[3] = {
void compute_mach_factor(void)
{
- register processor_set_t pset;
- register processor_t processor;
- register int ncpus;
- register int nthreads;
- register long factor_now;
- register long average_now;
- register long load_now;
+ processor_set_t pset;
+ processor_t processor;
+ int ncpus;
+ int nthreads;
+ long factor_now;
+ long average_now;
+ long load_now;
simple_lock(&all_psets_lock);
pset = (processor_set_t) queue_first(&all_psets);
@@ -123,7 +123,7 @@ void compute_mach_factor(void)
* And some ugly stuff to keep w happy.
*/
if (pset == &default_pset) {
- register int i;
+ int i;
for (i = 0; i < 3; i++) {
mach_factor[i] = ( (mach_factor[i]*fract[i])
diff --git a/kern/machine.c b/kern/machine.c
index c2a19b99..3f7a7f7f 100644
--- a/kern/machine.c
+++ b/kern/machine.c
@@ -72,12 +72,11 @@ decl_simple_lock_data(,action_lock);
* Flag specified cpu as up and running. Called when a processor comes
* online.
*/
-void cpu_up(cpu)
- int cpu;
+void cpu_up(int cpu)
{
- register struct machine_slot *ms;
- register processor_t processor;
- register spl_t s;
+ struct machine_slot *ms;
+ processor_t processor;
+ spl_t s;
processor = cpu_to_processor(cpu);
pset_lock(&default_pset);
@@ -102,12 +101,11 @@ void cpu_up(cpu)
* Flag specified cpu as down. Called when a processor is about to
* go offline.
*/
-void cpu_down(cpu)
- int cpu;
+void cpu_down(int cpu)
{
- register struct machine_slot *ms;
- register processor_t processor;
- register spl_t s;
+ struct machine_slot *ms;
+ processor_t processor;
+ spl_t s;
s = splsched();
processor = cpu_to_processor(cpu);
@@ -126,8 +124,8 @@ void cpu_down(cpu)
kern_return_t
host_reboot(host, options)
- host_t host;
- int options;
+ const host_t host;
+ int options;
{
if (host == HOST_NULL)
return (KERN_INVALID_HOST);
@@ -153,11 +151,11 @@ host_reboot(host, options)
* a reference.
*/
void
-processor_request_action(processor, new_pset)
-processor_t processor;
-processor_set_t new_pset;
+processor_request_action(
+ processor_t processor,
+ processor_set_t new_pset)
{
- register processor_set_t pset;
+ processor_set_t pset;
/*
* Processor must be in a processor set. Must lock its idle lock to
@@ -228,10 +226,10 @@ processor_set_t new_pset;
* Synchronizes with assignment completion if wait is TRUE.
*/
kern_return_t
-processor_assign(processor, new_pset, wait)
-processor_t processor;
-processor_set_t new_pset;
-boolean_t wait;
+processor_assign(
+ processor_t processor,
+ processor_set_t new_pset,
+ boolean_t wait)
{
spl_t s;
@@ -272,7 +270,7 @@ Retry:
assert_wait((event_t) processor, TRUE);
processor_unlock(processor);
splx(s);
- thread_block((void(*)()) 0);
+ thread_block(thread_no_continuation);
goto Retry;
}
@@ -301,7 +299,7 @@ Retry:
assert_wait((event_t)processor, TRUE);
processor_unlock(processor);
splx(s);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
s = splsched();
processor_lock(processor);
}
@@ -315,14 +313,11 @@ Retry:
#else /* MACH_HOST */
kern_return_t
-processor_assign(processor, new_pset, wait)
-processor_t processor;
-processor_set_t new_pset;
-boolean_t wait;
+processor_assign(
+ processor_t processor,
+ processor_set_t new_pset,
+ boolean_t wait)
{
-#ifdef lint
- processor++; new_pset++; wait++;
-#endif
return KERN_FAILURE;
}
@@ -334,8 +329,7 @@ boolean_t wait;
* with the shutdown (can be called from interrupt level).
*/
kern_return_t
-processor_shutdown(processor)
-processor_t processor;
+processor_shutdown(processor_t processor)
{
spl_t s;
@@ -364,12 +358,10 @@ processor_t processor;
/*
* action_thread() shuts down processors or changes their assignment.
*/
-void processor_doaction(); /* forward */
-
-void action_thread_continue()
+void action_thread_continue(void)
{
- register processor_t processor;
- register spl_t s;
+ processor_t processor;
+ spl_t s;
while (TRUE) {
s = splsched();
@@ -395,7 +387,7 @@ void action_thread_continue()
}
}
-void action_thread()
+void __attribute__((noreturn)) action_thread(void)
{
action_thread_continue();
/*NOTREACHED*/
@@ -406,21 +398,15 @@ void action_thread()
* is to schedule ourselves onto a cpu and then save our
* context back into the runqs before taking out the cpu.
*/
-#ifdef __GNUC__
-__volatile__
-#endif
-void processor_doshutdown(); /* forward */
-
-void processor_doaction(processor)
-register processor_t processor;
+void processor_doaction(processor_t processor)
{
thread_t this_thread;
spl_t s;
- register processor_set_t pset;
+ processor_set_t pset;
#if MACH_HOST
- register processor_set_t new_pset;
- register thread_t thread;
- register thread_t prev_thread = THREAD_NULL;
+ processor_set_t new_pset;
+ thread_t thread;
+ thread_t prev_thread = THREAD_NULL;
boolean_t have_pset_ref = FALSE;
#endif /* MACH_HOST */
@@ -429,7 +415,7 @@ register processor_t processor;
*/
this_thread = current_thread();
thread_bind(this_thread, processor);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
pset = processor->processor_set;
#if MACH_HOST
@@ -586,7 +572,7 @@ Restart_pset:
thread_deallocate(prev_thread);
thread_bind(this_thread, PROCESSOR_NULL);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
return;
}
@@ -633,13 +619,10 @@ Restart_pset:
* running on the processor's shutdown stack.
*/
-#ifdef __GNUC__
-__volatile__
-#endif
void processor_doshutdown(processor)
-register processor_t processor;
+processor_t processor;
{
- register int cpu = processor->slot_num;
+ int cpu = processor->slot_num;
timer_switch(&kernel_timer[cpu]);
@@ -663,23 +646,20 @@ register processor_t processor;
#else /* NCPUS > 1 */
kern_return_t
-processor_assign(processor, new_pset, wait)
-processor_t processor;
-processor_set_t new_pset;
-boolean_t wait;
+processor_assign(
+ processor_t processor,
+ processor_set_t new_pset,
+ boolean_t wait)
{
-#ifdef lint
- processor++; new_pset++; wait++;
-#endif /* lint */
return(KERN_FAILURE);
}
#endif /* NCPUS > 1 */
kern_return_t
-host_get_boot_info(priv_host, boot_info)
- host_t priv_host;
- kernel_boot_info_t boot_info;
+host_get_boot_info(
+ host_t priv_host,
+ kernel_boot_info_t boot_info)
{
char *src = "";
diff --git a/kern/machine.h b/kern/machine.h
index af2b7e91..c67213a2 100644
--- a/kern/machine.h
+++ b/kern/machine.h
@@ -53,6 +53,6 @@ extern kern_return_t processor_shutdown (processor_t);
/*
* action_thread() shuts down processors or changes their assignment.
*/
-extern void action_thread_continue (void);
+extern void action_thread_continue (void) __attribute__((noreturn));
#endif /* _MACHINE_H_ */
diff --git a/kern/macro_help.h b/kern/macro_help.h
deleted file mode 100644
index a3d156b7..00000000
--- a/kern/macro_help.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989,1988 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: kern/macro_help.h
- *
- * Provide help in making lint-free macro routines
- *
- */
-
-#ifndef _KERN_MACRO_HELP_H_
-#define _KERN_MACRO_HELP_H_
-
-#if !defined(MACRO_BEGIN)
-
-#include <mach/boolean.h>
-
-#ifdef lint
-boolean_t NEVER;
-boolean_t ALWAYS;
-#else /* lint */
-#define NEVER FALSE
-#define ALWAYS TRUE
-#endif /* lint */
-
-#define MACRO_BEGIN ({
-#define MACRO_END })
-
-#define MACRO_RETURN if (ALWAYS) return
-
-#endif /* !MACRO_BEGIN */
-
-#endif /* _KERN_MACRO_HELP_H_ */
diff --git a/kern/macros.h b/kern/macros.h
new file mode 100644
index 00000000..c2e8545a
--- /dev/null
+++ b/kern/macros.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2009, 2010, 2013 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Helper macros.
+ */
+
+#ifndef _KERN_MACROS_H
+#define _KERN_MACROS_H
+
+#define MACRO_BEGIN ({
+#define MACRO_END })
+#define MACRO_RETURN if (1) return
+
+#define __QUOTE(x) #x
+#define QUOTE(x) __QUOTE(x)
+
+#ifdef __ASSEMBLER__
+#define DECL_CONST(x, s) x
+#else /* __ASSEMBLER__ */
+#define __DECL_CONST(x, s) x##s
+#define DECL_CONST(x, s) __DECL_CONST(x, s)
+#endif /* __ASSEMBLER__ */
+
+#define STRLEN(x) (sizeof(x) - 1)
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#define DIV_CEIL(n, d) (((n) + (d) - 1) / (d))
+
+#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
+#define ISP2(x) P2ALIGNED(x, x)
+#define P2ALIGN(x, a) ((x) & -(a))
+#define P2ROUND(x, a) (-(-(x) & -(a)))
+#define P2END(x, a) (-(~(x) & -(a)))
+
+#define structof(ptr, type, member) \
+ ((type *)((char *)(ptr) - offsetof(type, member)))
+
+#define access_once(x) (*(volatile typeof(x) *)&(x))
+
+#define alignof(x) __alignof__(x)
+
+#ifndef likely
+#define likely(expr) __builtin_expect(!!(expr), 1)
+#endif /* likely */
+#ifndef unlikely
+#define unlikely(expr) __builtin_expect(!!(expr), 0)
+#endif /* unlikely */
+
+#ifndef barrier
+#define barrier() asm volatile("" : : : "memory")
+#endif /* barrier */
+
+#define __noreturn __attribute__((noreturn))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __always_inline inline __attribute__((always_inline))
+#ifndef __section
+#define __section(x) __attribute__((section(x)))
+#endif /* __section */
+#define __packed __attribute__((packed))
+#define __alias(x) __attribute__((alias(x)))
+
+#define __format_printf(fmt, args) \
+ __attribute__((format(printf, fmt, args)))
+
+#endif /* _KERN_MACROS_H */
diff --git a/kern/pc_sample.c b/kern/pc_sample.c
index 57002581..fcb9d71b 100644
--- a/kern/pc_sample.c
+++ b/kern/pc_sample.c
@@ -43,12 +43,9 @@
typedef sampled_pc_t sampled_pcs[MAX_PC_SAMPLES];
-int pc_sampling_enabled = 0;
-decl_simple_lock_data(, pc_sampling_lock) /* lock for enabling */
-
void take_pc_sample(
- register thread_t t,
- register sample_control_t *cp,
+ const thread_t t,
+ sample_control_t *cp,
sampled_pc_flavor_t flavor)
{
vm_offset_t pc;
@@ -192,6 +189,9 @@ get_sampled_pcs(
(sampled_pc_array_t)cp->buffer,
(seqidx2 + 1) * sizeof(sampled_pc_t));
}
+ } else if (nsamples < 0) {
+ /* Bogus SEQNO supplied. */
+ nsamples = 0;
} else {
/* could either be zero because of overflow, or because
* we are being lied to. In either case, return nothing.
@@ -244,8 +244,8 @@ task_get_sampled_pcs(
kern_return_t
thread_enable_pc_sampling(
- thread_t thread,
- int *tickp,
+ const thread_t thread,
+ const int *tickp,
sampled_pc_flavor_t flavors)
{
return KERN_FAILURE; /* not implemented */
@@ -253,8 +253,8 @@ thread_enable_pc_sampling(
kern_return_t
task_enable_pc_sampling(
- task_t task,
- int *tickp,
+ const task_t task,
+ const int *tickp,
sampled_pc_flavor_t flavors)
{
return KERN_FAILURE; /* not implemented */
@@ -262,36 +262,36 @@ task_enable_pc_sampling(
kern_return_t
thread_disable_pc_sampling(
- thread_t thread,
- int *samplecntp)
+ const thread_t thread,
+ const int *samplecntp)
{
return KERN_FAILURE; /* not implemented */
}
kern_return_t
task_disable_pc_sampling(
- task_t task,
- int *samplecntp)
+ const task_t task,
+ const int *samplecntp)
{
return KERN_FAILURE; /* not implemented */
}
kern_return_t
thread_get_sampled_pcs(
- thread_t thread,
- sampled_pc_seqno_t *seqnop,
- sampled_pc_array_t sampled_pcs_out,
- int *sampled_pcs_cntp)
+ const thread_t thread,
+ const sampled_pc_seqno_t *seqnop,
+ const sampled_pc_array_t sampled_pcs_out,
+ const int *sampled_pcs_cntp)
{
return KERN_FAILURE; /* not implemented */
}
kern_return_t
task_get_sampled_pcs(
- task_t task,
- sampled_pc_seqno_t *seqnop,
- sampled_pc_array_t sampled_pcs_out,
- int *sampled_pcs_cntp)
+ const task_t task,
+ const sampled_pc_seqno_t *seqnop,
+ const sampled_pc_array_t sampled_pcs_out,
+ const int *sampled_pcs_cntp)
{
return KERN_FAILURE; /* not implemented */
}
diff --git a/kern/pc_sample.h b/kern/pc_sample.h
index 3c64068d..4832cb9f 100644
--- a/kern/pc_sample.h
+++ b/kern/pc_sample.h
@@ -49,7 +49,7 @@
#include <mach/pc_sample.h>
#include <mach/machine/vm_types.h>
#include <kern/kern_types.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
/*
* Control structure for sampling, included in
diff --git a/kern/printf.c b/kern/printf.c
index a3a771d0..50f23623 100644
--- a/kern/printf.c
+++ b/kern/printf.c
@@ -126,17 +126,17 @@
#define isdigit(d) ((d) >= '0' && (d) <= '9')
#define Ctod(c) ((c) - '0')
-#define MAXBUF (sizeof(long int) * 8) /* enough for binary */
+#define MAXBUF (sizeof(long long int) * 8) /* enough for binary */
void printnum(
- register unsigned long u,
- register int base,
+ unsigned long long u,
+ int base,
void (*putc)( char, vm_offset_t ),
vm_offset_t putc_arg)
{
char buf[MAXBUF]; /* build number here */
- register char * p = &buf[MAXBUF-1];
+ char * p = &buf[MAXBUF-1];
static char digs[] = "0123456789abcdef";
do {
@@ -151,23 +151,8 @@ void printnum(
boolean_t _doprnt_truncates = FALSE;
-/* printf could be called at _any_ point during system initialization,
- including before printf_init() gets called from the "normal" place
- in kern/startup.c. */
-boolean_t _doprnt_lock_initialized = FALSE;
-decl_simple_lock_data(,_doprnt_lock)
-
-void printf_init(void)
-{
- if (!_doprnt_lock_initialized)
- {
- _doprnt_lock_initialized = TRUE;
- simple_lock_init(&_doprnt_lock);
- }
-}
-
void _doprnt(
- register const char *fmt,
+ const char *fmt,
va_list argp,
/* character output routine */
void (*putc)( char, vm_offset_t),
@@ -178,29 +163,14 @@ void _doprnt(
int prec;
boolean_t ladjust;
char padc;
- long n;
- unsigned long u;
+ long long n;
+ unsigned long long u;
+ int have_long_long;
int plus_sign;
int sign_char;
boolean_t altfmt, truncate;
int base;
- register char c;
-
- printf_init();
-
-#if 0
- /* Make sure that we get *some* printout, no matter what */
- simple_lock(&_doprnt_lock);
-#else
- {
- register int i = 0;
- while (i < 1*1024*1024) {
- if (simple_lock_try(&_doprnt_lock))
- break;
- i++;
- }
- }
-#endif
+ char c;
while ((c = *fmt) != '\0') {
if (c != '%') {
@@ -218,6 +188,7 @@ void _doprnt(
plus_sign = 0;
sign_char = 0;
altfmt = FALSE;
+ have_long_long = FALSE;
while (TRUE) {
c = *fmt;
@@ -276,6 +247,10 @@ void _doprnt(
if (c == 'l')
c = *++fmt; /* need it if sizeof(int) < sizeof(long) */
+ if (c == 'l') {
+ c = *++fmt; /* handle `long long' */
+ have_long_long = TRUE;
+ }
truncate = FALSE;
@@ -283,11 +258,14 @@ void _doprnt(
case 'b':
case 'B':
{
- register char *p;
- boolean_t any;
- register int i;
-
- u = va_arg(argp, unsigned long);
+ char *p;
+ boolean_t any;
+ int i;
+
+ if (! have_long_long)
+ u = va_arg(argp, unsigned long);
+ else
+ u = va_arg(argp, unsigned long long);
p = va_arg(argp, char *);
base = *p++;
printnum(u, base, putc, putc_arg);
@@ -302,7 +280,7 @@ void _doprnt(
/*
* Bit field
*/
- register int j;
+ int j;
if (any)
(*putc)(',', putc_arg);
else {
@@ -342,8 +320,8 @@ void _doprnt(
case 's':
{
- register char *p;
- register char *p2;
+ char *p;
+ char *p2;
if (prec == -1)
prec = 0x7fffffff; /* MAXINT */
@@ -431,7 +409,10 @@ void _doprnt(
goto print_unsigned;
print_signed:
- n = va_arg(argp, long);
+ if (! have_long_long)
+ n = va_arg(argp, long);
+ else
+ n = va_arg(argp, long long);
if (n >= 0) {
u = n;
sign_char = plus_sign;
@@ -443,13 +424,16 @@ void _doprnt(
goto print_num;
print_unsigned:
- u = va_arg(argp, unsigned long);
+ if (! have_long_long)
+ u = va_arg(argp, unsigned long);
+ else
+ u = va_arg(argp, unsigned long long);
goto print_num;
print_num:
{
char buf[MAXBUF]; /* build number here */
- register char * p = &buf[MAXBUF-1];
+ char * p = &buf[MAXBUF-1];
static char digits[] = "0123456789abcdef";
char *prefix = 0;
@@ -507,8 +491,6 @@ void _doprnt(
}
fmt++;
}
-
- simple_unlock(&_doprnt_lock);
}
/*
@@ -540,7 +522,7 @@ int indent = 0;
void iprintf(const char *fmt, ...)
{
va_list listp;
- register int i;
+ int i;
for (i = indent; i > 0; ){
if (i >= 8) {
@@ -567,8 +549,8 @@ sputc(
char c,
vm_offset_t arg)
{
- register char **bufp = (char **) arg;
- register char *p = *bufp;
+ char **bufp = (char **) arg;
+ char *p = *bufp;
*p++ = c;
*bufp = p;
}
@@ -615,13 +597,23 @@ vsnprintf(char *buf, size_t size, const char *fmt, va_list args)
return cookie.index;
}
+int
+snprintf(char *buf, size_t size, const char *fmt, ...)
+{
+ int written;
+ va_list listp;
+ va_start(listp, fmt);
+ written = vsnprintf(buf, size, fmt, listp);
+ va_end(listp);
+ return written;
+}
-void safe_gets(str, maxlen)
- char *str;
- int maxlen;
+void safe_gets(
+ char *str,
+ int maxlen)
{
- register char *lp;
- register int c;
+ char *lp;
+ int c;
char *strmax = str + maxlen - 1; /* allow space for trailing 0 */
lp = str;
diff --git a/kern/printf.h b/kern/printf.h
index 8b4e7606..b72640aa 100644
--- a/kern/printf.h
+++ b/kern/printf.h
@@ -27,22 +27,26 @@
#include <sys/types.h>
#include <stdarg.h>
-extern void printf_init (void);
-
extern void _doprnt (const char *fmt,
va_list argp,
void (*putc)(char, vm_offset_t),
int radix,
vm_offset_t putc_arg);
-extern void printnum (unsigned long u, int base,
+extern void printnum (unsigned long long u, int base,
void (*putc)(char, vm_offset_t),
vm_offset_t putc_arg);
-extern int sprintf (char *buf, const char *fmt, ...);
-extern int vsnprintf (char *buf, size_t size, const char *fmt, va_list args);
+extern int sprintf (char *buf, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int snprintf (char *buf, size_t size, const char *fmt, ...)
+ __attribute__ ((format (printf, 3, 4)));
+extern int vsnprintf (char *buf, size_t size, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 3, 0)));
+
-extern int printf (const char *fmt, ...);
+extern int printf (const char *fmt, ...)
+ __attribute__ ((format (printf, 1, 2)));
#define printf_once(fmt, ...) \
MACRO_BEGIN \
diff --git a/kern/priority.c b/kern/priority.c
index 17541b8b..587ea2f9 100644
--- a/kern/priority.c
+++ b/kern/priority.c
@@ -27,7 +27,7 @@
* the rights to redistribute these changes.
*/
/*
- * File: clock_prim.c
+ * File: priority.c
* Author: Avadis Tevanian, Jr.
* Date: 1986
*
@@ -74,16 +74,16 @@
* Called only from clock_interrupt().
*/
-void thread_quantum_update(mycpu, thread, nticks, state)
- register int mycpu;
- register thread_t thread;
- int nticks;
- int state;
+void thread_quantum_update(
+ int mycpu,
+ thread_t thread,
+ int nticks,
+ int state)
{
- register int quantum;
- register processor_t myprocessor;
+ int quantum;
+ processor_t myprocessor;
#if NCPUS > 1
- register processor_set_t pset;
+ processor_set_t pset;
#endif
spl_t s;
diff --git a/kern/priority.h b/kern/priority.h
new file mode 100644
index 00000000..2da93ebe
--- /dev/null
+++ b/kern/priority.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_PRIORITY_H_
+#define _KERN_PRIORITY_H_
+
+extern void thread_quantum_update(
+ int mycpu,
+ thread_t thread,
+ int nticks,
+ int state);
+
+#endif /* _KERN_PRIORITY_H_ */
diff --git a/kern/processor.c b/kern/processor.c
index 19868609..0a88469b 100644
--- a/kern/processor.c
+++ b/kern/processor.c
@@ -66,18 +66,11 @@ processor_t master_processor;
processor_t processor_ptr[NCPUS];
/*
- * Forward declarations.
- */
-void quantum_set(processor_set_t);
-void pset_init(processor_set_t);
-void processor_init(processor_t, int);
-
-/*
* Bootstrap the processor/pset system so the scheduler can run.
*/
void pset_sys_bootstrap(void)
{
- register int i;
+ int i;
pset_init(&default_pset);
default_pset.empty = FALSE;
@@ -109,14 +102,14 @@ void pset_sys_bootstrap(void)
*/
void pset_sys_init(void)
{
- register int i;
- register processor_t processor;
+ int i;
+ processor_t processor;
/*
* Allocate the cache for processor sets.
*/
kmem_cache_init(&pset_cache, "processor_set",
- sizeof(struct processor_set), 0, NULL, NULL, NULL, 0);
+ sizeof(struct processor_set), 0, NULL, 0);
/*
* Give each processor a control port.
@@ -138,7 +131,7 @@ void pset_sys_init(void)
*/
void pset_init(
- register processor_set_t pset)
+ processor_set_t pset)
{
int i;
@@ -189,7 +182,7 @@ void pset_init(
*/
void processor_init(
- register processor_t pr,
+ processor_t pr,
int slot_num)
{
int i;
@@ -414,14 +407,14 @@ void pset_reference(
kern_return_t
processor_info(
- register processor_t processor,
+ processor_t processor,
int flavor,
host_t *host,
processor_info_t info,
natural_t *count)
{
- register int slot_num, state;
- register processor_basic_info_t basic_info;
+ int slot_num, state;
+ processor_basic_info_t basic_info;
if (processor == PROCESSOR_NULL)
return KERN_INVALID_ARGUMENT;
@@ -503,7 +496,7 @@ void quantum_set(
processor_set_t pset)
{
#if NCPUS > 1
- register int i,ncpus;
+ int i, ncpus;
ncpus = pset->processor_count;
@@ -567,8 +560,8 @@ processor_set_create(
kern_return_t processor_set_destroy(
processor_set_t pset)
{
- register queue_entry_t elem;
- register queue_head_t *list;
+ queue_entry_t elem;
+ queue_head_t *list;
if (pset == PROCESSOR_SET_NULL || pset == &default_pset)
return KERN_INVALID_ARGUMENT;
@@ -647,18 +640,12 @@ processor_set_create(
processor_set_t *new_set,
processor_set_t *new_name)
{
-#ifdef lint
- host++; new_set++; new_name++;
-#endif /* lint */
return KERN_FAILURE;
}
kern_return_t processor_set_destroy(
processor_set_t pset)
{
-#ifdef lint
- pset++;
-#endif /* lint */
return KERN_FAILURE;
}
@@ -670,6 +657,8 @@ processor_get_assignment(
processor_set_t *pset)
{
int state;
+ if (processor == PROCESSOR_NULL)
+ return KERN_INVALID_ARGUMENT;
state = processor->state;
if (state == PROCESSOR_SHUTDOWN || state == PROCESSOR_OFF_LINE)
@@ -692,7 +681,7 @@ processor_set_info(
return KERN_INVALID_ARGUMENT;
if (flavor == PROCESSOR_SET_BASIC_INFO) {
- register processor_set_basic_info_t basic_info;
+ processor_set_basic_info_t basic_info;
if (*count < PROCESSOR_SET_BASIC_INFO_COUNT)
return KERN_FAILURE;
@@ -712,7 +701,7 @@ processor_set_info(
return KERN_SUCCESS;
}
else if (flavor == PROCESSOR_SET_SCHED_INFO) {
- register processor_set_sched_info_t sched_info;
+ processor_set_sched_info_t sched_info;
if (*count < PROCESSOR_SET_SCHED_INFO_COUNT)
return KERN_FAILURE;
@@ -757,8 +746,8 @@ processor_set_max_priority(
pset->max_priority = max_priority;
if (change_threads) {
- register queue_head_t *list;
- register thread_t thread;
+ queue_head_t *list;
+ thread_t thread;
list = &pset->threads;
queue_iterate(list, thread, thread_t, pset_threads) {
@@ -828,8 +817,8 @@ processor_set_policy_disable(
pset->policies &= ~policy;
if (change_threads) {
- register queue_head_t *list;
- register thread_t thread;
+ queue_head_t *list;
+ thread_t thread;
list = &pset->threads;
queue_iterate(list, thread, thread_t, pset_threads) {
diff --git a/kern/processor.h b/kern/processor.h
index 9a6c944b..b81526c0 100644
--- a/kern/processor.h
+++ b/kern/processor.h
@@ -320,4 +320,10 @@ extern kern_return_t processor_set_threads(
natural_t *count);
#endif
+void processor_doaction(processor_t processor);
+void processor_doshutdown(processor_t processor);
+void quantum_set(processor_set_t pset);
+void pset_init(processor_set_t pset);
+void processor_init(processor_t pr, int slot_num);
+
#endif /* _KERN_PROCESSOR_H_ */
diff --git a/kern/profile.c b/kern/profile.c
index e14d4116..1381b1a5 100644
--- a/kern/profile.c
+++ b/kern/profile.c
@@ -172,7 +172,7 @@ printf("profile_thread: mach_msg failed returned %x\n",(int)mr);
sizeof(struct buf_to_send));
}
- thread_halt_self();
+ thread_halt_self(thread_exception_return);
}
@@ -194,7 +194,7 @@ thread_t th;
* Make a request to the profile_thread by inserting
* the buffer in the send queue, and wake it up.
* The last buffer must be inserted at the head of the
- * send queue, so the profile_thread handles it immediatly.
+ * send queue, so the profile_thread handles it immediately.
*/
if (kmem_alloc( kernel_map, &vm_buf_entry,
sizeof(struct buf_to_send)) != KERN_SUCCESS)
@@ -213,7 +213,7 @@ thread_t th;
thread_wakeup((event_t) profile_thread);
assert_wait((event_t) &buf_entry->wakeme, TRUE);
splx(s);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
} else {
splx(s);
kmem_free(kernel_map, vm_buf_entry, sizeof(struct buf_to_send));
diff --git a/kern/queue.c b/kern/queue.c
index 98b74c20..d9396e54 100644
--- a/kern/queue.c
+++ b/kern/queue.c
@@ -35,8 +35,8 @@
* Insert element at head of queue.
*/
void enqueue_head(
- register queue_t que,
- register queue_entry_t elt)
+ queue_t que,
+ queue_entry_t elt)
{
elt->next = que->next;
elt->prev = que;
@@ -48,8 +48,8 @@ void enqueue_head(
* Insert element at tail of queue.
*/
void enqueue_tail(
- register queue_t que,
- register queue_entry_t elt)
+ queue_t que,
+ queue_entry_t elt)
{
elt->next = que;
elt->prev = que->prev;
@@ -61,9 +61,9 @@ void enqueue_tail(
* Remove and return element at head of queue.
*/
queue_entry_t dequeue_head(
- register queue_t que)
+ queue_t que)
{
- register queue_entry_t elt;
+ queue_entry_t elt;
if (que->next == que)
return((queue_entry_t)0);
@@ -78,9 +78,9 @@ queue_entry_t dequeue_head(
* Remove and return element at tail of queue.
*/
queue_entry_t dequeue_tail(
- register queue_t que)
+ queue_t que)
{
- register queue_entry_t elt;
+ queue_entry_t elt;
if (que->prev == que)
return((queue_entry_t)0);
@@ -100,7 +100,7 @@ queue_entry_t dequeue_tail(
/*ARGSUSED*/
void remqueue(
queue_t que,
- register queue_entry_t elt)
+ queue_entry_t elt)
{
elt->next->prev = elt->prev;
elt->prev->next = elt->next;
@@ -111,8 +111,8 @@ void remqueue(
* package.
*/
void insque(
- register struct queue_entry *entry,
- register struct queue_entry *pred)
+ struct queue_entry *entry,
+ struct queue_entry *pred)
{
entry->next = pred->next;
entry->prev = pred;
@@ -122,7 +122,7 @@ void insque(
struct queue_entry
*remque(
- register struct queue_entry *elt)
+ struct queue_entry *elt)
{
(elt->next)->prev = elt->prev;
(elt->prev)->next = elt->next;
diff --git a/kern/queue.h b/kern/queue.h
index 1846922a..f0b4002f 100644
--- a/kern/queue.h
+++ b/kern/queue.h
@@ -87,6 +87,14 @@ void remqueue(queue_t, queue_entry_t);
void insque(queue_entry_t, queue_entry_t);
/*
+ * Macro: queue_assert
+ * Function:
+ * Used by macros to assert that the given argument is a
+ * queue.
+ */
+#define queue_assert(q) (void) ((void) (q)->next, (q)->prev)
+
+/*
* Macro: queue_init
* Function:
* Initialize the given queue.
@@ -104,7 +112,7 @@ void insque(queue_entry_t, queue_entry_t);
* queue_entry_t queue_first(q)
* queue_t q; *IN*
*/
-#define queue_first(q) ((q)->next)
+#define queue_first(q) (queue_assert(q), (q)->next)
/*
* Macro: queue_next
@@ -114,7 +122,7 @@ void insque(queue_entry_t, queue_entry_t);
* queue_entry_t queue_next(qc)
* queue_t qc;
*/
-#define queue_next(qc) ((qc)->next)
+#define queue_next(qc) (queue_assert(qc), (qc)->next)
/*
* Macro: queue_last
@@ -124,7 +132,7 @@ void insque(queue_entry_t, queue_entry_t);
* queue_entry_t queue_last(q)
* queue_t q; *IN*
*/
-#define queue_last(q) ((q)->prev)
+#define queue_last(q) (queue_assert(q), (q)->prev)
/*
* Macro: queue_prev
@@ -134,7 +142,7 @@ void insque(queue_entry_t, queue_entry_t);
* queue_entry_t queue_prev(qc)
* queue_t qc;
*/
-#define queue_prev(qc) ((qc)->prev)
+#define queue_prev(qc) (queue_assert(qc), (qc)->prev)
/*
* Macro: queue_end
@@ -146,7 +154,8 @@ void insque(queue_entry_t, queue_entry_t);
* queue_t q;
* queue_entry_t qe;
*/
-#define queue_end(q, qe) ((q) == (qe))
+#define queue_end(q, qe) (queue_assert(q), queue_assert(qe), \
+ (q) == (qe))
/*
* Macro: queue_empty
@@ -179,7 +188,9 @@ void insque(queue_entry_t, queue_entry_t);
*/
#define queue_enter(head, elt, type, field) \
{ \
- register queue_entry_t prev; \
+ queue_assert(head); \
+ queue_assert(&(elt)->field); \
+ queue_entry_t prev; \
\
prev = (head)->prev; \
if ((head) == prev) { \
@@ -206,7 +217,9 @@ void insque(queue_entry_t, queue_entry_t);
*/
#define queue_enter_first(head, elt, type, field) \
{ \
- register queue_entry_t next; \
+ queue_assert(head); \
+ queue_assert(&(elt)->field); \
+ queue_entry_t next; \
\
next = (head)->next; \
if ((head) == next) { \
@@ -239,7 +252,9 @@ void insque(queue_entry_t, queue_entry_t);
*/
#define queue_remove(head, elt, type, field) \
{ \
- register queue_entry_t next, prev; \
+ queue_assert(head); \
+ queue_assert(&(elt)->field); \
+ queue_entry_t next, prev; \
\
next = (elt)->field.next; \
prev = (elt)->field.prev; \
@@ -266,7 +281,9 @@ void insque(queue_entry_t, queue_entry_t);
*/
#define queue_remove_first(head, entry, type, field) \
{ \
- register queue_entry_t next; \
+ queue_assert(head); \
+ queue_assert(&(entry)->field); \
+ queue_entry_t next; \
\
(entry) = (type) ((head)->next); \
next = (entry)->field.next; \
@@ -289,7 +306,9 @@ void insque(queue_entry_t, queue_entry_t);
*/
#define queue_remove_last(head, entry, type, field) \
{ \
- register queue_entry_t prev; \
+ queue_assert(head); \
+ queue_assert(&(entry)->field); \
+ queue_entry_t prev; \
\
(entry) = (type) ((head)->prev); \
prev = (entry)->field.prev; \
@@ -306,6 +325,8 @@ void insque(queue_entry_t, queue_entry_t);
*/
#define queue_assign(to, from, type, field) \
{ \
+ queue_assert(&(to)->field); \
+ queue_assert(&(from)->field); \
((type)((from)->prev))->field.next = (to); \
((type)((from)->next))->field.prev = (to); \
*to = *from; \
diff --git a/kern/rbtree.h b/kern/rbtree.h
index 5a65d1ef..16ef2736 100644
--- a/kern/rbtree.h
+++ b/kern/rbtree.h
@@ -31,13 +31,9 @@
#include <stddef.h>
#include <kern/assert.h>
-#include <kern/macro_help.h>
-#include <kern/rbtree.h>
+#include <kern/macros.h>
#include <sys/types.h>
-#define structof(ptr, type, member) \
- ((type *)((char *)ptr - offsetof(type, member)))
-
/*
* Indexes of the left and right nodes in the children array of a node.
*/
@@ -178,8 +174,8 @@ MACRO_END
* This macro performs a standard lookup to obtain the insertion point of
* the given node in the tree (it is assumed that the inserted node never
* compares equal to any other entry in the tree) and links the node. It
- * then It then checks red-black rules violations, and rebalances the tree
- * if necessary.
+ * then checks red-black rules violations, and rebalances the tree if
+ * necessary.
*
* Unlike rbtree_lookup(), the cmp_fn parameter must compare two complete
* entries, so it is suggested to use two different comparison inline
diff --git a/kern/rdxtree.c b/kern/rdxtree.c
new file mode 100644
index 00000000..a23d6e7e
--- /dev/null
+++ b/kern/rdxtree.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (c) 2011-2015 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#include <kern/assert.h>
+#include <kern/slab.h>
+#include <mach/kern_return.h>
+#include <stddef.h>
+#include <string.h>
+
+#include "macros.h"
+#include "rdxtree.h"
+#include "rdxtree_i.h"
+
+/* XXX */
+#define CHAR_BIT 8U
+#define ERR_SUCCESS KERN_SUCCESS
+#define ERR_BUSY KERN_INVALID_ARGUMENT
+#define ERR_NOMEM KERN_RESOURCE_SHORTAGE
+
+/*
+ * Mask applied on an entry to obtain its address.
+ */
+#define RDXTREE_ENTRY_ADDR_MASK (~0x3UL)
+
+/*
+ * Global properties used to shape radix trees.
+ */
+#define RDXTREE_RADIX 6
+#define RDXTREE_RADIX_SIZE (1UL << RDXTREE_RADIX)
+#define RDXTREE_RADIX_MASK (RDXTREE_RADIX_SIZE - 1)
+
+#if RDXTREE_RADIX < 6
+typedef unsigned long rdxtree_bm_t;
+#define rdxtree_ffs(x) __builtin_ffsl(x)
+#elif RDXTREE_RADIX == 6 /* RDXTREE_RADIX < 6 */
+typedef unsigned long long rdxtree_bm_t;
+#define rdxtree_ffs(x) __builtin_ffsll(x)
+#else /* RDXTREE_RADIX < 6 */
+#error "radix too high"
+#endif /* RDXTREE_RADIX < 6 */
+
+/*
+ * Allocation bitmap size in bits.
+ */
+#define RDXTREE_BM_SIZE (sizeof(rdxtree_bm_t) * CHAR_BIT)
+
+/*
+ * Empty/full allocation bitmap words.
+ */
+#define RDXTREE_BM_EMPTY ((rdxtree_bm_t)0)
+#define RDXTREE_BM_FULL \
+ ((~(rdxtree_bm_t)0) >> (RDXTREE_BM_SIZE - RDXTREE_RADIX_SIZE))
+
+/*
+ * These macros can be replaced by actual functions in an environment
+ * that provides lockless synchronization such as RCU.
+ */
+#define llsync_assign_ptr(ptr, value) ((ptr) = (value))
+#define llsync_read_ptr(ptr) (ptr)
+
+/*
+ * Radix tree node.
+ *
+ * The height of a tree is the number of nodes to traverse until stored
+ * pointers are reached. A height of 0 means the entries of a node (or the
+ * tree root) directly point to stored pointers.
+ *
+ * The index is valid if and only if the parent isn't NULL.
+ *
+ * Concerning the allocation bitmap, a bit is set when the node it denotes,
+ * or one of its children, can be used to allocate an entry. Conversely, a bit
+ * is clear when the matching node and all of its children have no free entry.
+ *
+ * In order to support safe lockless lookups, in particular during a resize,
+ * each node includes the height of its subtree, which is invariant during
+ * the entire node lifetime. Since the tree height does vary, it can't be
+ * used to determine whether the tree root is a node or a stored pointer.
+ * This implementation assumes that all nodes and stored pointers are at least
+ * 4-byte aligned, and uses the least significant bit of entries to indicate
+ * the pointer type. This bit is set for internal nodes, and clear for stored
+ * pointers so that they can be accessed from slots without conversion.
+ */
+struct rdxtree_node {
+ struct rdxtree_node *parent;
+ unsigned int index;
+ unsigned int height;
+ unsigned int nr_entries;
+ rdxtree_bm_t alloc_bm;
+ void *entries[RDXTREE_RADIX_SIZE];
+};
+
+/*
+ * We allocate nodes using the slab allocator.
+ */
+static struct kmem_cache rdxtree_node_cache;
+
+void
+rdxtree_cache_init(void)
+{
+ kmem_cache_init(&rdxtree_node_cache, "rdxtree_node",
+ sizeof(struct rdxtree_node), 0, NULL, 0);
+}
+
+#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
+unsigned int rdxtree_fail_node_creation_threshold;
+unsigned int rdxtree_nr_node_creations;
+#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */
+
+static inline int
+rdxtree_check_alignment(const void *ptr)
+{
+ return ((unsigned long)ptr & ~RDXTREE_ENTRY_ADDR_MASK) == 0;
+}
+
+static inline void *
+rdxtree_entry_addr(void *entry)
+{
+ return (void *)((unsigned long)entry & RDXTREE_ENTRY_ADDR_MASK);
+}
+
+static inline int
+rdxtree_entry_is_node(const void *entry)
+{
+ return ((unsigned long)entry & 1) != 0;
+}
+
+static inline void *
+rdxtree_node_to_entry(struct rdxtree_node *node)
+{
+ return (void *)((unsigned long)node | 1);
+}
+
+static int
+rdxtree_node_create(struct rdxtree_node **nodep, unsigned int height)
+{
+ struct rdxtree_node *node;
+
+#ifdef RDXTREE_ENABLE_NODE_CREATION_FAILURES
+ if (rdxtree_fail_node_creation_threshold != 0) {
+ rdxtree_nr_node_creations++;
+
+ if (rdxtree_nr_node_creations == rdxtree_fail_node_creation_threshold)
+ return ERR_NOMEM;
+ }
+#endif /* RDXTREE_ENABLE_NODE_CREATION_FAILURES */
+
+ node = (struct rdxtree_node *) kmem_cache_alloc(&rdxtree_node_cache);
+
+ if (node == NULL)
+ return ERR_NOMEM;
+
+ assert(rdxtree_check_alignment(node));
+ node->parent = NULL;
+ node->height = height;
+ node->nr_entries = 0;
+ node->alloc_bm = RDXTREE_BM_FULL;
+ memset(node->entries, 0, sizeof(node->entries));
+ *nodep = node;
+ return 0;
+}
+
+static void
+rdxtree_node_schedule_destruction(struct rdxtree_node *node)
+{
+ /*
+ * This function is intended to use the appropriate interface to defer
+ * destruction until all read-side references are dropped in an
+ * environment that provides lockless synchronization.
+ *
+ * Otherwise, it simply "schedules" destruction immediately.
+ */
+ kmem_cache_free(&rdxtree_node_cache, (vm_offset_t) node);
+}
+
+static inline void
+rdxtree_node_link(struct rdxtree_node *node, struct rdxtree_node *parent,
+ unsigned int index)
+{
+ node->parent = parent;
+ node->index = index;
+}
+
+static inline void
+rdxtree_node_unlink(struct rdxtree_node *node)
+{
+ assert(node->parent != NULL);
+ node->parent = NULL;
+}
+
+static inline int
+rdxtree_node_full(struct rdxtree_node *node)
+{
+ return (node->nr_entries == ARRAY_SIZE(node->entries));
+}
+
+static inline int
+rdxtree_node_empty(struct rdxtree_node *node)
+{
+ return (node->nr_entries == 0);
+}
+
+static inline void
+rdxtree_node_insert(struct rdxtree_node *node, unsigned int index,
+ void *entry)
+{
+ assert(index < ARRAY_SIZE(node->entries));
+ assert(node->entries[index] == NULL);
+
+ node->nr_entries++;
+ llsync_assign_ptr(node->entries[index], entry);
+}
+
+static inline void
+rdxtree_node_insert_node(struct rdxtree_node *node, unsigned int index,
+ struct rdxtree_node *child)
+{
+ rdxtree_node_insert(node, index, rdxtree_node_to_entry(child));
+}
+
+static inline void
+rdxtree_node_remove(struct rdxtree_node *node, unsigned int index)
+{
+ assert(index < ARRAY_SIZE(node->entries));
+ assert(node->entries[index] != NULL);
+
+ node->nr_entries--;
+ llsync_assign_ptr(node->entries[index], NULL);
+}
+
+static inline void *
+rdxtree_node_find(struct rdxtree_node *node, unsigned int *indexp)
+{
+ unsigned int index;
+ void *ptr;
+
+ index = *indexp;
+
+ while (index < ARRAY_SIZE(node->entries)) {
+ ptr = rdxtree_entry_addr(llsync_read_ptr(node->entries[index]));
+
+ if (ptr != NULL) {
+ *indexp = index;
+ return ptr;
+ }
+
+ index++;
+ }
+
+ return NULL;
+}
+
+static inline void
+rdxtree_node_bm_set(struct rdxtree_node *node, unsigned int index)
+{
+ node->alloc_bm |= (rdxtree_bm_t)1 << index;
+}
+
+static inline void
+rdxtree_node_bm_clear(struct rdxtree_node *node, unsigned int index)
+{
+ node->alloc_bm &= ~((rdxtree_bm_t)1 << index);
+}
+
+static inline int
+rdxtree_node_bm_is_set(struct rdxtree_node *node, unsigned int index)
+{
+ return (node->alloc_bm & ((rdxtree_bm_t)1 << index));
+}
+
+static inline int
+rdxtree_node_bm_empty(struct rdxtree_node *node)
+{
+ return (node->alloc_bm == RDXTREE_BM_EMPTY);
+}
+
+static inline unsigned int
+rdxtree_node_bm_first(struct rdxtree_node *node)
+{
+ return rdxtree_ffs(node->alloc_bm) - 1;
+}
+
+static inline rdxtree_key_t
+rdxtree_max_key(unsigned int height)
+{
+ size_t shift;
+
+ shift = RDXTREE_RADIX * height;
+
+ if (likely(shift < (sizeof(rdxtree_key_t) * CHAR_BIT)))
+ return ((rdxtree_key_t)1 << shift) - 1;
+ else
+ return ~((rdxtree_key_t)0);
+}
+
+static void
+rdxtree_shrink(struct rdxtree *tree)
+{
+ struct rdxtree_node *node;
+ void *entry;
+
+ while (tree->height > 0) {
+ node = rdxtree_entry_addr(tree->root);
+
+ if (node->nr_entries != 1)
+ break;
+
+ entry = node->entries[0];
+
+ if (entry == NULL)
+ break;
+
+ tree->height--;
+
+ if (tree->height > 0)
+ rdxtree_node_unlink(rdxtree_entry_addr(entry));
+
+ llsync_assign_ptr(tree->root, entry);
+ rdxtree_node_schedule_destruction(node);
+ }
+}
+
+static int
+rdxtree_grow(struct rdxtree *tree, rdxtree_key_t key)
+{
+ struct rdxtree_node *root, *node;
+ unsigned int new_height;
+ int error;
+
+ new_height = tree->height + 1;
+
+ while (key > rdxtree_max_key(new_height))
+ new_height++;
+
+ if (tree->root == NULL) {
+ tree->height = new_height;
+ return ERR_SUCCESS;
+ }
+
+ root = rdxtree_entry_addr(tree->root);
+
+ do {
+ error = rdxtree_node_create(&node, tree->height);
+
+ if (error) {
+ rdxtree_shrink(tree);
+ return error;
+ }
+
+ if (tree->height == 0)
+ rdxtree_node_bm_clear(node, 0);
+ else {
+ rdxtree_node_link(root, node, 0);
+
+ if (rdxtree_node_bm_empty(root))
+ rdxtree_node_bm_clear(node, 0);
+ }
+
+ rdxtree_node_insert(node, 0, tree->root);
+ tree->height++;
+ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node));
+ root = node;
+ } while (new_height > tree->height);
+
+ return ERR_SUCCESS;
+}
+
+static void
+rdxtree_cleanup(struct rdxtree *tree, struct rdxtree_node *node)
+{
+ struct rdxtree_node *prev;
+
+ for (;;) {
+ if (likely(!rdxtree_node_empty(node))) {
+ if (unlikely(node->parent == NULL))
+ rdxtree_shrink(tree);
+
+ break;
+ }
+
+ if (node->parent == NULL) {
+ tree->height = 0;
+ llsync_assign_ptr(tree->root, NULL);
+ rdxtree_node_schedule_destruction(node);
+ break;
+ }
+
+ prev = node;
+ node = node->parent;
+ rdxtree_node_unlink(prev);
+ rdxtree_node_remove(node, prev->index);
+ rdxtree_node_schedule_destruction(prev);
+ }
+}
+
+static void
+rdxtree_insert_bm_clear(struct rdxtree_node *node, unsigned int index)
+{
+ for (;;) {
+ rdxtree_node_bm_clear(node, index);
+
+ if (!rdxtree_node_full(node) || (node->parent == NULL))
+ break;
+
+ index = node->index;
+ node = node->parent;
+ }
+}
+
+int
+rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
+ void *ptr, void ***slotp)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index = index;
+ int error;
+
+ assert(ptr != NULL);
+ assert(rdxtree_check_alignment(ptr));
+
+ if (unlikely(key > rdxtree_max_key(tree->height))) {
+ error = rdxtree_grow(tree, key);
+
+ if (error)
+ return error;
+ }
+
+ height = tree->height;
+
+ if (unlikely(height == 0)) {
+ if (tree->root != NULL)
+ return ERR_BUSY;
+
+ llsync_assign_ptr(tree->root, ptr);
+
+ if (slotp != NULL)
+ *slotp = &tree->root;
+
+ return ERR_SUCCESS;
+ }
+
+ node = rdxtree_entry_addr(tree->root);
+ shift = (height - 1) * RDXTREE_RADIX;
+ prev = NULL;
+
+ do {
+ if (node == NULL) {
+ error = rdxtree_node_create(&node, height - 1);
+
+ if (error) {
+ if (prev == NULL)
+ tree->height = 0;
+ else
+ rdxtree_cleanup(tree, prev);
+
+ return error;
+ }
+
+ if (prev == NULL)
+ llsync_assign_ptr(tree->root, rdxtree_node_to_entry(node));
+ else {
+ rdxtree_node_link(node, prev, index);
+ rdxtree_node_insert_node(prev, index, node);
+ }
+ }
+
+ prev = node;
+ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
+ node = rdxtree_entry_addr(prev->entries[index]);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (unlikely(node != NULL))
+ return ERR_BUSY;
+
+ rdxtree_node_insert(prev, index, ptr);
+ rdxtree_insert_bm_clear(prev, index);
+
+ if (slotp != NULL)
+ *slotp = &prev->entries[index];
+
+ return ERR_SUCCESS;
+}
+
+int
+rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
+ rdxtree_key_t *keyp, void ***slotp)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index = index;
+ rdxtree_key_t key;
+ int error;
+
+ assert(ptr != NULL);
+ assert(rdxtree_check_alignment(ptr));
+
+ height = tree->height;
+
+ if (unlikely(height == 0)) {
+ if (tree->root == NULL) {
+ llsync_assign_ptr(tree->root, ptr);
+ *keyp = 0;
+
+ if (slotp != NULL)
+ *slotp = &tree->root;
+
+ return ERR_SUCCESS;
+ }
+
+ goto grow;
+ }
+
+ node = rdxtree_entry_addr(tree->root);
+ key = 0;
+ shift = (height - 1) * RDXTREE_RADIX;
+ prev = NULL;
+
+ do {
+ if (node == NULL) {
+ error = rdxtree_node_create(&node, height - 1);
+
+ if (error) {
+ rdxtree_cleanup(tree, prev);
+ return error;
+ }
+
+ rdxtree_node_link(node, prev, index);
+ rdxtree_node_insert_node(prev, index, node);
+ }
+
+ prev = node;
+ index = rdxtree_node_bm_first(node);
+
+ if (index == (unsigned int)-1)
+ goto grow;
+
+ key |= (rdxtree_key_t)index << shift;
+ node = rdxtree_entry_addr(node->entries[index]);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ rdxtree_node_insert(prev, index, ptr);
+ rdxtree_insert_bm_clear(prev, index);
+
+ if (slotp != NULL)
+ *slotp = &prev->entries[index];
+
+ goto out;
+
+grow:
+ key = rdxtree_max_key(height) + 1;
+ error = rdxtree_insert_common(tree, key, ptr, slotp);
+
+ if (error)
+ return error;
+
+out:
+ *keyp = key;
+ return ERR_SUCCESS;
+}
+
+static void
+rdxtree_remove_bm_set(struct rdxtree_node *node, unsigned int index)
+{
+ do {
+ rdxtree_node_bm_set(node, index);
+
+ if (node->parent == NULL)
+ break;
+
+ index = node->index;
+ node = node->parent;
+ } while (!rdxtree_node_bm_is_set(node, index));
+}
+
+void *
+rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index;
+
+ height = tree->height;
+
+ if (unlikely(key > rdxtree_max_key(height)))
+ return NULL;
+
+ node = rdxtree_entry_addr(tree->root);
+
+ if (unlikely(height == 0)) {
+ llsync_assign_ptr(tree->root, NULL);
+ return node;
+ }
+
+ shift = (height - 1) * RDXTREE_RADIX;
+
+ do {
+ if (node == NULL)
+ return NULL;
+
+ prev = node;
+ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
+ node = rdxtree_entry_addr(node->entries[index]);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node == NULL)
+ return NULL;
+
+ rdxtree_node_remove(prev, index);
+ rdxtree_remove_bm_set(prev, index);
+ rdxtree_cleanup(tree, prev);
+ return node;
+}
+
+void *
+rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
+ int get_slot)
+{
+ struct rdxtree_node *node, *prev;
+ unsigned int height, shift, index;
+ void *entry;
+
+ entry = llsync_read_ptr(tree->root);
+
+ if (entry == NULL) {
+ node = NULL;
+ height = 0;
+ } else {
+ node = rdxtree_entry_addr(entry);
+ height = rdxtree_entry_is_node(entry) ? node->height + 1 : 0;
+ }
+
+ if (key > rdxtree_max_key(height))
+ return NULL;
+
+ if (height == 0) {
+ if (node == NULL)
+ return NULL;
+
+ return get_slot ? (void *)&tree->root : node;
+ }
+
+ shift = (height - 1) * RDXTREE_RADIX;
+
+ do {
+ if (node == NULL)
+ return NULL;
+
+ prev = node;
+ index = (unsigned int)(key >> shift) & RDXTREE_RADIX_MASK;
+ entry = llsync_read_ptr(node->entries[index]);
+ node = rdxtree_entry_addr(entry);
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ if (node == NULL)
+ return NULL;
+
+ return get_slot ? (void *)&prev->entries[index] : node;
+}
+
+void *
+rdxtree_replace_slot(void **slot, void *ptr)
+{
+ void *old;
+
+ assert(ptr != NULL);
+ assert(rdxtree_check_alignment(ptr));
+
+ old = *slot;
+ assert(old != NULL);
+ assert(rdxtree_check_alignment(old));
+ llsync_assign_ptr(*slot, ptr);
+ return old;
+}
+
+static void *
+rdxtree_walk_next(struct rdxtree *tree, struct rdxtree_iter *iter)
+{
+ struct rdxtree_node *root, *node, *prev;
+ unsigned int height, shift, index, orig_index;
+ rdxtree_key_t key;
+ void *entry;
+
+ entry = llsync_read_ptr(tree->root);
+
+ if (entry == NULL)
+ return NULL;
+
+ if (!rdxtree_entry_is_node(entry)) {
+ if (iter->key != (rdxtree_key_t)-1)
+ return NULL;
+ else {
+ iter->key = 0;
+ return rdxtree_entry_addr(entry);
+ }
+ }
+
+ key = iter->key + 1;
+
+ if ((key == 0) && (iter->node != NULL))
+ return NULL;
+
+ root = rdxtree_entry_addr(entry);
+
+restart:
+ node = root;
+ height = root->height + 1;
+
+ if (key > rdxtree_max_key(height))
+ return NULL;
+
+ shift = (height - 1) * RDXTREE_RADIX;
+
+ do {
+ prev = node;
+ index = (key >> shift) & RDXTREE_RADIX_MASK;
+ orig_index = index;
+ node = rdxtree_node_find(node, &index);
+
+ if (node == NULL) {
+ shift += RDXTREE_RADIX;
+ key = ((key >> shift) + 1) << shift;
+
+ if (key == 0)
+ return NULL;
+
+ goto restart;
+ }
+
+ if (orig_index != index)
+ key = ((key >> shift) + (index - orig_index)) << shift;
+
+ shift -= RDXTREE_RADIX;
+ height--;
+ } while (height > 0);
+
+ iter->node = prev;
+ iter->key = key;
+ return node;
+}
+
+void *
+rdxtree_walk(struct rdxtree *tree, struct rdxtree_iter *iter)
+{
+ unsigned int index, orig_index;
+ void *ptr;
+
+ if (iter->node == NULL)
+ return rdxtree_walk_next(tree, iter);
+
+ index = (iter->key + 1) & RDXTREE_RADIX_MASK;
+
+ if (index != 0) {
+ orig_index = index;
+ ptr = rdxtree_node_find(iter->node, &index);
+
+ if (ptr != NULL) {
+ iter->key += (index - orig_index) + 1;
+ return ptr;
+ }
+ }
+
+ return rdxtree_walk_next(tree, iter);
+}
+
+void
+rdxtree_remove_all(struct rdxtree *tree)
+{
+ struct rdxtree_node *node, *parent;
+ struct rdxtree_iter iter;
+
+ if (tree->height == 0) {
+ if (tree->root != NULL)
+ llsync_assign_ptr(tree->root, NULL);
+
+ return;
+ }
+
+ for (;;) {
+ rdxtree_iter_init(&iter);
+ rdxtree_walk_next(tree, &iter);
+
+ if (iter.node == NULL)
+ break;
+
+ node = iter.node;
+ parent = node->parent;
+
+ if (parent == NULL)
+ rdxtree_init(tree);
+ else {
+ rdxtree_node_remove(parent, node->index);
+ rdxtree_remove_bm_set(parent, node->index);
+ rdxtree_cleanup(tree, parent);
+ node->parent = NULL;
+ }
+
+ rdxtree_node_schedule_destruction(node);
+ }
+}
diff --git a/kern/rdxtree.h b/kern/rdxtree.h
new file mode 100644
index 00000000..1f8456e0
--- /dev/null
+++ b/kern/rdxtree.h
@@ -0,0 +1,209 @@
+/*
+ * Copyright (c) 2011-2015 Richard Braun.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * Radix tree.
+ *
+ * In addition to the standard insertion operation, this implementation
+ * can allocate keys for the caller at insertion time.
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#ifndef _RDXTREE_H
+#define _RDXTREE_H
+
+#include <stddef.h>
+#include <sys/types.h>
+
+/*
+ * Initialize the node cache.
+ */
+void rdxtree_cache_init(void);
+
+/*
+ * This macro selects between 32 or 64-bits (the default) keys.
+ */
+#if 0
+#define RDXTREE_KEY_32
+#endif
+
+#ifdef RDXTREE_KEY_32
+typedef uint32_t rdxtree_key_t;
+#else /* RDXTREE_KEY_32 */
+typedef uint64_t rdxtree_key_t;
+#endif /* RDXTREE_KEY_32 */
+
+/*
+ * Radix tree.
+ */
+struct rdxtree;
+
+/*
+ * Radix tree iterator.
+ */
+struct rdxtree_iter;
+
+/*
+ * Static tree initializer.
+ */
+#define RDXTREE_INITIALIZER { 0, NULL }
+
+#include "rdxtree_i.h"
+
+/*
+ * Initialize a tree.
+ */
+static inline void
+rdxtree_init(struct rdxtree *tree)
+{
+ tree->height = 0;
+ tree->root = NULL;
+}
+
+/*
+ * Insert a pointer in a tree.
+ *
+ * The ptr parameter must not be NULL.
+ */
+static inline int
+rdxtree_insert(struct rdxtree *tree, rdxtree_key_t key, void *ptr)
+{
+ return rdxtree_insert_common(tree, key, ptr, NULL);
+}
+
+/*
+ * Insert a pointer in a tree and obtain its slot.
+ *
+ * The ptr and slotp parameters must not be NULL. If successful, the slot of
+ * the newly inserted pointer is stored at the address pointed to by the slotp
+ * parameter.
+ */
+static inline int
+rdxtree_insert_slot(struct rdxtree *tree, rdxtree_key_t key,
+ void *ptr, void ***slotp)
+{
+ return rdxtree_insert_common(tree, key, ptr, slotp);
+}
+
+/*
+ * Insert a pointer in a tree, for which a new key is allocated.
+ *
+ * The ptr and keyp parameters must not be NULL. The newly allocated key is
+ * stored at the address pointed to by the keyp parameter.
+ */
+static inline int
+rdxtree_insert_alloc(struct rdxtree *tree, void *ptr, rdxtree_key_t *keyp)
+{
+ return rdxtree_insert_alloc_common(tree, ptr, keyp, NULL);
+}
+
+/*
+ * Insert a pointer in a tree, for which a new key is allocated, and obtain
+ * its slot.
+ *
+ * The ptr, keyp and slotp parameters must not be NULL. The newly allocated
+ * key is stored at the address pointed to by the keyp parameter while the
+ * slot of the inserted pointer is stored at the address pointed to by the
+ * slotp parameter.
+ */
+static inline int
+rdxtree_insert_alloc_slot(struct rdxtree *tree, void *ptr,
+ rdxtree_key_t *keyp, void ***slotp)
+{
+ return rdxtree_insert_alloc_common(tree, ptr, keyp, slotp);
+}
+
+/*
+ * Remove a pointer from a tree.
+ *
+ * The matching pointer is returned if successful, NULL otherwise.
+ */
+void * rdxtree_remove(struct rdxtree *tree, rdxtree_key_t key);
+
+/*
+ * Look up a pointer in a tree.
+ *
+ * The matching pointer is returned if successful, NULL otherwise.
+ */
+static inline void *
+rdxtree_lookup(const struct rdxtree *tree, rdxtree_key_t key)
+{
+ return rdxtree_lookup_common(tree, key, 0);
+}
+
+/*
+ * Look up a slot in a tree.
+ *
+ * A slot is a pointer to a stored pointer in a tree. It can be used as
+ * a placeholder for fast replacements to avoid multiple lookups on the same
+ * key.
+ *
+ * A slot for the matching pointer is returned if successful, NULL otherwise.
+ *
+ * See rdxtree_replace_slot().
+ */
+static inline void **
+rdxtree_lookup_slot(const struct rdxtree *tree, rdxtree_key_t key)
+{
+ return rdxtree_lookup_common(tree, key, 1);
+}
+
+/*
+ * Replace a pointer in a tree.
+ *
+ * The ptr parameter must not be NULL. The previous pointer is returned.
+ *
+ * See rdxtree_lookup_slot().
+ */
+void * rdxtree_replace_slot(void **slot, void *ptr);
+
+/*
+ * Forge a loop to process all pointers of a tree.
+ */
+#define rdxtree_for_each(tree, iter, ptr) \
+for (rdxtree_iter_init(iter), ptr = rdxtree_walk(tree, iter); \
+ ptr != NULL; \
+ ptr = rdxtree_walk(tree, iter))
+
+/*
+ * Return the key of the current pointer from an iterator.
+ */
+static inline rdxtree_key_t
+rdxtree_iter_key(const struct rdxtree_iter *iter)
+{
+ return iter->key;
+}
+
+/*
+ * Remove all pointers from a tree.
+ *
+ * The common way to destroy a tree and its pointers is to loop over all
+ * the pointers using rdxtree_for_each(), freeing them, then call this
+ * function.
+ */
+void rdxtree_remove_all(struct rdxtree *tree);
+
+#endif /* _RDXTREE_H */
diff --git a/kern/rdxtree_i.h b/kern/rdxtree_i.h
new file mode 100644
index 00000000..1bd1f64a
--- /dev/null
+++ b/kern/rdxtree_i.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (c) 2013-2015 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 3 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Upstream site with license notes :
+ * http://git.sceen.net/rbraun/librbraun.git/
+ */
+
+#ifndef _RDXTREE_I_H
+#define _RDXTREE_I_H
+
+/*
+ * Radix tree.
+ */
+struct rdxtree {
+ unsigned int height;
+ void *root;
+};
+
+/*
+ * Radix tree iterator.
+ *
+ * The node member refers to the node containing the current pointer, if any.
+ * The key member refers to the current pointer, and is valid if and only if
+ * rdxtree_walk() has been called at least once on the iterator.
+ */
+struct rdxtree_iter {
+ void *node;
+ rdxtree_key_t key;
+};
+
+/*
+ * Initialize an iterator.
+ */
+static inline void
+rdxtree_iter_init(struct rdxtree_iter *iter)
+{
+ iter->node = NULL;
+ iter->key = (rdxtree_key_t)-1;
+}
+
+int rdxtree_insert_common(struct rdxtree *tree, rdxtree_key_t key,
+ void *ptr, void ***slotp);
+
+int rdxtree_insert_alloc_common(struct rdxtree *tree, void *ptr,
+ rdxtree_key_t *keyp, void ***slotp);
+
+void * rdxtree_lookup_common(const struct rdxtree *tree, rdxtree_key_t key,
+ int get_slot);
+
+void * rdxtree_walk(struct rdxtree *tree, struct rdxtree_iter *iter);
+
+#endif /* _RDXTREE_I_H */
diff --git a/kern/refcount.h b/kern/refcount.h
index 7fd6cdfb..f32feb87 100644
--- a/kern/refcount.h
+++ b/kern/refcount.h
@@ -27,9 +27,7 @@
#ifndef _KERN_REFCOUNT_H_
#define _KERN_REFCOUNT_H_
-#include <kern/macro_help.h>
-
-#include "refcount.h" /*XXX*/
+#include <kern/macros.h>
/* Unless the above include file specified otherwise,
use the system-independent (unoptimized) atomic reference counter. */
@@ -65,6 +63,6 @@ typedef struct RefCount RefCount;
if (new_value == 0) { func; } \
MACRO_END
-#endif
+#endif /* MACHINE_REFCOUNT */
-#endif _KERN_REFCOUNT_H_
+#endif /* _KERN_REFCOUNT_H_ */
diff --git a/kern/sched.h b/kern/sched.h
index 942dd80f..f82f9f56 100644
--- a/kern/sched.h
+++ b/kern/sched.h
@@ -38,7 +38,7 @@
#include <kern/queue.h>
#include <kern/lock.h>
#include <kern/kern_types.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#if MACH_FIXPRI
#include <mach/policy.h>
@@ -153,7 +153,7 @@ extern unsigned sched_tick;
#define thread_timer_delta(thread) \
MACRO_BEGIN \
- register unsigned delta; \
+ unsigned delta; \
\
delta = 0; \
TIMER_DELTA((thread)->system_timer, \
diff --git a/kern/sched_prim.c b/kern/sched_prim.c
index 46b5df43..0cef1601 100644
--- a/kern/sched_prim.c
+++ b/kern/sched_prim.c
@@ -44,7 +44,7 @@
#include <kern/lock.h>
#include <kern/mach_clock.h>
#include <kern/mach_factor.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/processor.h>
#include <kern/queue.h>
#include <kern/sched.h>
@@ -70,18 +70,8 @@ int sched_usec;
thread_t sched_thread_id;
-void set_pri(thread_t, int, boolean_t);
-void do_thread_scan(void);
-
-thread_t choose_pset_thread();
-
timer_elt_data_t recompute_priorities_timer;
-#if DEBUG
-void checkrq(run_queue_t, char *);
-void thread_check(thread_t, run_queue_t);
-#endif
-
/*
* State machine
*
@@ -150,7 +140,7 @@ decl_simple_lock_data(, wait_lock[NUMQUEUES])
void wait_queue_init(void)
{
- register int i;
+ int i;
for (i = 0; i < NUMQUEUES; i++) {
queue_init(&wait_queue[i]);
@@ -165,7 +155,7 @@ void sched_init(void)
min_quantum = hz / 10; /* context switch 10 times/second */
wait_queue_init();
- pset_sys_bootstrap(); /* initialize processer mgmt. */
+ pset_sys_bootstrap(); /* initialize processor mgmt. */
queue_init(&action_queue);
simple_lock_init(&action_lock);
sched_tick = 0;
@@ -199,8 +189,8 @@ void thread_timeout(
void thread_set_timeout(
int t) /* timeout interval in ticks */
{
- register thread_t thread = current_thread();
- register spl_t s;
+ thread_t thread = current_thread();
+ spl_t s;
s = splsched();
thread_lock(thread);
@@ -215,7 +205,7 @@ void thread_set_timeout(
* Set up thread timeout element when thread is created.
*/
void thread_timeout_setup(
- register thread_t thread)
+ thread_t thread)
{
thread->timer.fcn = thread_timeout;
thread->timer.param = thread;
@@ -233,12 +223,10 @@ void assert_wait(
event_t event,
boolean_t interruptible)
{
- register queue_t q;
- register int index;
- register thread_t thread;
-#if MACH_SLOCKS
- register simple_lock_t lock;
-#endif /* MACH_SLOCKS */
+ queue_t q;
+ int index;
+ thread_t thread;
+ decl_simple_lock_data( , *lock);
spl_t s;
thread = current_thread();
@@ -250,12 +238,10 @@ void assert_wait(
if (event != 0) {
index = wait_hash(event);
q = &wait_queue[index];
-#if MACH_SLOCKS
lock = &wait_lock[index];
-#endif /* MACH_SLOCKS */
simple_lock(lock);
thread_lock(thread);
- enqueue_tail(q, (queue_entry_t) thread);
+ enqueue_tail(q, &(thread->links));
thread->wait_event = event;
if (interruptible)
thread->state |= TH_WAIT;
@@ -288,16 +274,14 @@ void assert_wait(
* interruptible.
*/
void clear_wait(
- register thread_t thread,
+ thread_t thread,
int result,
boolean_t interrupt_only)
{
- register int index;
- register queue_t q;
-#if MACH_SLOCKS
- register simple_lock_t lock;
-#endif /* MACH_SLOCKS */
- register event_t event;
+ int index;
+ queue_t q;
+ decl_simple_lock_data( , *lock);
+ event_t event;
spl_t s;
s = splsched();
@@ -316,9 +300,7 @@ void clear_wait(
thread_unlock(thread);
index = wait_hash(event);
q = &wait_queue[index];
-#if MACH_SLOCKS
lock = &wait_lock[index];
-#endif /* MACH_SLOCKS */
simple_lock(lock);
/*
* If the thread is still waiting on that event,
@@ -335,7 +317,7 @@ void clear_wait(
simple_unlock(lock);
}
if (event == 0) {
- register int state = thread->state;
+ int state = thread->state;
reset_timeout_check(&thread->timer);
@@ -376,7 +358,7 @@ void clear_wait(
}
static inline void __attribute__((noreturn))
-state_panic(thread_t thread, const char *caller)
+state_panic(const thread_t thread, const char *caller)
{
panic ("%s: thread %x has unexpected state %x",
caller, thread, thread->state);
@@ -394,21 +376,17 @@ void thread_wakeup_prim(
boolean_t one_thread,
int result)
{
- register queue_t q;
- register int index;
- register thread_t thread, next_th;
-#if MACH_SLOCKS
- register simple_lock_t lock;
-#endif /* MACH_SLOCKS */
+ queue_t q;
+ int index;
+ thread_t thread, next_th;
+ decl_simple_lock_data( , *lock);
spl_t s;
- register int state;
+ int state;
index = wait_hash(event);
q = &wait_queue[index];
s = splsched();
-#if MACH_SLOCKS
lock = &wait_lock[index];
-#endif /* MACH_SLOCKS */
simple_lock(lock);
thread = (thread_t) queue_first(q);
while (!queue_end(q, (queue_entry_t)thread)) {
@@ -468,6 +446,9 @@ void thread_wakeup_prim(
* occurs. The specified lock is unlocked before releasing
* the cpu. (This is a convenient way to sleep without manually
* calling assert_wait).
+ *
+ * Note: if the event may be woken from an interrupt handler, this must be
+ * called at an spl level that prevents such interrupts.
*/
void thread_sleep(
event_t event,
@@ -476,7 +457,7 @@ void thread_sleep(
{
assert_wait(event, interruptible); /* assert event */
simple_unlock(lock); /* release the lock */
- thread_block((void (*)()) 0); /* block ourselves */
+ thread_block(thread_no_continuation); /* block ourselves */
}
/*
@@ -490,8 +471,8 @@ void thread_sleep(
* xxx - DO NOT export this to users.
*/
void thread_bind(
- register thread_t thread,
- processor_t processor)
+ thread_t thread,
+ processor_t processor)
{
spl_t s;
@@ -509,9 +490,9 @@ void thread_bind(
*/
thread_t thread_select(
- register processor_t myprocessor)
+ processor_t myprocessor)
{
- register thread_t thread;
+ thread_t thread;
myprocessor->first_quantum = TRUE;
/*
@@ -523,7 +504,7 @@ thread_t thread_select(
myprocessor->quantum = min_quantum;
}
else {
- register processor_set_t pset;
+ processor_set_t pset;
#if MACH_HOST
pset = myprocessor->processor_set;
@@ -559,7 +540,7 @@ thread_t thread_select(
}
}
else {
- register queue_t q;
+ queue_t q;
/*
* If there is a thread at hint, grab it,
@@ -622,9 +603,9 @@ thread_t thread_select(
*/
boolean_t thread_invoke(
- register thread_t old_thread,
- continuation_t continuation,
- register thread_t new_thread)
+ thread_t old_thread,
+ continuation_t continuation,
+ thread_t new_thread)
{
/*
* Check for invoking the same thread.
@@ -637,9 +618,9 @@ boolean_t thread_invoke(
thread_lock(new_thread);
new_thread->state &= ~TH_UNINT;
thread_unlock(new_thread);
- thread_wakeup(&new_thread->state);
+ thread_wakeup(TH_EV_STATE(new_thread));
- if (continuation != (void (*)()) 0) {
+ if (continuation != thread_no_continuation) {
(void) spl0();
call_continuation(continuation);
/*NOTREACHED*/
@@ -652,14 +633,14 @@ boolean_t thread_invoke(
*/
thread_lock(new_thread);
if ((old_thread->stack_privilege != current_stack()) &&
- (continuation != (void (*)()) 0))
+ (continuation != thread_no_continuation))
{
switch (new_thread->state & TH_SWAP_STATE) {
case TH_SWAPPED:
new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
thread_unlock(new_thread);
- thread_wakeup(&new_thread->state);
+ thread_wakeup(TH_EV_STATE(new_thread));
#if NCPUS > 1
new_thread->last_processor = current_processor();
@@ -698,7 +679,7 @@ boolean_t thread_invoke(
if (old_thread->wake_active) {
old_thread->wake_active = FALSE;
thread_unlock(old_thread);
- thread_wakeup((event_t)&old_thread->wake_active);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(old_thread));
goto after_old_thread;
}
@@ -748,7 +729,7 @@ boolean_t thread_invoke(
* running out of stack.
*/
- counter_always(c_thread_invoke_hits++);
+ counter(c_thread_invoke_hits++);
(void) spl0();
call_continuation(new_thread->swap_func);
/*NOTREACHED*/
@@ -760,7 +741,7 @@ boolean_t thread_invoke(
*/
thread_swapin(new_thread);
thread_unlock(new_thread);
- counter_always(c_thread_invoke_misses++);
+ counter(c_thread_invoke_misses++);
return FALSE;
case 0:
@@ -781,7 +762,7 @@ boolean_t thread_invoke(
{
thread_swapin(new_thread);
thread_unlock(new_thread);
- counter_always(c_thread_invoke_misses++);
+ counter(c_thread_invoke_misses++);
return FALSE;
}
}
@@ -789,7 +770,7 @@ boolean_t thread_invoke(
new_thread->state &= ~(TH_SWAPPED | TH_UNINT);
thread_unlock(new_thread);
- thread_wakeup(&new_thread->state);
+ thread_wakeup(TH_EV_STATE(new_thread));
/*
* Thread is now interruptible.
@@ -810,7 +791,7 @@ boolean_t thread_invoke(
* changing address spaces. It updates active_threads.
* It returns only if a continuation is not supplied.
*/
- counter_always(c_thread_invoke_csw++);
+ counter(c_thread_invoke_csw++);
old_thread = switch_context(old_thread, continuation, new_thread);
/*
@@ -829,9 +810,9 @@ boolean_t thread_invoke(
* Called at splsched.
*/
void thread_continue(
- register thread_t old_thread)
+ thread_t old_thread)
{
- register continuation_t continuation = current_thread()->swap_func;
+ continuation_t continuation = current_thread()->swap_func;
/*
* We must dispatch the old thread and then
@@ -865,9 +846,9 @@ void thread_continue(
void thread_block(
continuation_t continuation)
{
- register thread_t thread = current_thread();
- register processor_t myprocessor = cpu_to_processor(cpu_number());
- register thread_t new_thread;
+ thread_t thread = current_thread();
+ processor_t myprocessor = cpu_to_processor(cpu_number());
+ thread_t new_thread;
spl_t s;
check_simple_locks();
@@ -906,10 +887,10 @@ void thread_block(
*/
void thread_run(
continuation_t continuation,
- register thread_t new_thread)
+ thread_t new_thread)
{
- register thread_t thread = current_thread();
- register processor_t myprocessor = cpu_to_processor(cpu_number());
+ thread_t thread = current_thread();
+ processor_t myprocessor = cpu_to_processor(cpu_number());
spl_t s;
check_simple_locks();
@@ -928,7 +909,7 @@ void thread_run(
*/
void thread_dispatch(
- register thread_t thread)
+ thread_t thread)
{
/*
* If we are discarding the thread's stack, we must do it
@@ -937,7 +918,7 @@ void thread_dispatch(
thread_lock(thread);
- if (thread->swap_func != (void (*)()) 0) {
+ if (thread->swap_func != thread_no_continuation) {
assert((thread->state & TH_SWAP_STATE) == 0);
thread->state |= TH_SWAPPED;
stack_free(thread);
@@ -954,7 +935,7 @@ void thread_dispatch(
if (thread->wake_active) {
thread->wake_active = FALSE;
thread_unlock(thread);
- thread_wakeup((event_t)&thread->wake_active);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(thread));
return;
}
break;
@@ -1053,10 +1034,10 @@ shift_data_t wait_shift[32] = {
*/
void compute_priority(
- register thread_t thread,
+ thread_t thread,
boolean_t resched)
{
- register int pri;
+ int pri;
#if MACH_FIXPRI
if (thread->policy == POLICY_TIMESHARE) {
@@ -1085,9 +1066,9 @@ void compute_priority(
*/
void compute_my_priority(
- register thread_t thread)
+ thread_t thread)
{
- register int temp_pri;
+ int temp_pri;
do_priority_computation(thread,temp_pri);
thread->sched_pri = temp_pri;
@@ -1132,11 +1113,11 @@ void recompute_priorities(void *param)
* can only be called by the thread on itself.
*/
void update_priority(
- register thread_t thread)
+ thread_t thread)
{
- register unsigned int ticks;
- register shift_t shiftp;
- register int temp_pri;
+ unsigned int ticks;
+ shift_t shiftp;
+ int temp_pri;
ticks = sched_tick - thread->sched_stamp;
@@ -1196,7 +1177,7 @@ void update_priority(
#if DEBUG
#define run_queue_enqueue(rq, th) \
MACRO_BEGIN \
- register unsigned int whichq; \
+ unsigned int whichq; \
\
whichq = (th)->sched_pri; \
if (whichq >= NRQS) { \
@@ -1206,7 +1187,7 @@ void update_priority(
\
simple_lock(&(rq)->lock); /* lock the run queue */ \
checkrq((rq), "thread_setrun: before adding thread"); \
- enqueue_tail(&(rq)->runq[whichq], (queue_entry_t) (th)); \
+ enqueue_tail(&(rq)->runq[whichq], &((th)->links)); \
\
if (whichq < (rq)->low || (rq)->count == 0) \
(rq)->low = whichq; /* minimize */ \
@@ -1220,7 +1201,7 @@ void update_priority(
#else /* DEBUG */
#define run_queue_enqueue(rq, th) \
MACRO_BEGIN \
- register unsigned int whichq; \
+ unsigned int whichq; \
\
whichq = (th)->sched_pri; \
if (whichq >= NRQS) { \
@@ -1229,7 +1210,7 @@ void update_priority(
} \
\
simple_lock(&(rq)->lock); /* lock the run queue */ \
- enqueue_tail(&(rq)->runq[whichq], (queue_entry_t) (th)); \
+ enqueue_tail(&(rq)->runq[whichq], &((th)->links)); \
\
if (whichq < (rq)->low || (rq)->count == 0) \
(rq)->low = whichq; /* minimize */ \
@@ -1249,13 +1230,13 @@ void update_priority(
*/
void thread_setrun(
- register thread_t th,
+ thread_t th,
boolean_t may_preempt)
{
- register processor_t processor;
- register run_queue_t rq;
+ processor_t processor;
+ run_queue_t rq;
#if NCPUS > 1
- register processor_set_t pset;
+ processor_set_t pset;
#endif /* NCPUS > 1 */
/*
@@ -1423,7 +1404,7 @@ void set_pri(
int pri,
boolean_t resched)
{
- register struct run_queue *rq;
+ struct run_queue *rq;
rq = rem_runq(th);
th->sched_pri = pri;
@@ -1448,7 +1429,7 @@ void set_pri(
struct run_queue *rem_runq(
thread_t th)
{
- register struct run_queue *rq;
+ struct run_queue *rq;
rq = th->runq;
/*
@@ -1514,10 +1495,10 @@ thread_t choose_thread(
processor_t myprocessor)
{
thread_t th;
- register queue_t q;
- register run_queue_t runq;
- register int i;
- register processor_set_t pset;
+ queue_t q;
+ run_queue_t runq;
+ int i;
+ processor_set_t pset;
runq = &myprocessor->runq;
@@ -1558,13 +1539,13 @@ thread_t choose_thread(
*/
thread_t choose_pset_thread(
- register processor_t myprocessor,
+ processor_t myprocessor,
processor_set_t pset)
{
- register run_queue_t runq;
- register thread_t th;
- register queue_t q;
- register int i;
+ run_queue_t runq;
+ thread_t th;
+ queue_t q;
+ int i;
runq = &pset->runq;
@@ -1640,14 +1621,14 @@ int no_dispatch_count = 0;
* to execute.
*/
-void idle_thread_continue(void)
+void __attribute__((noreturn)) idle_thread_continue(void)
{
- register processor_t myprocessor;
- register volatile thread_t *threadp;
- register volatile int *gcount;
- register volatile int *lcount;
- register thread_t new_thread;
- register int state;
+ processor_t myprocessor;
+ volatile thread_t *threadp;
+ volatile int *gcount;
+ volatile int *lcount;
+ thread_t new_thread;
+ int state;
int mycpu;
spl_t s;
@@ -1746,7 +1727,7 @@ retry:
thread_run(idle_thread_continue, new_thread);
}
else if (state == PROCESSOR_IDLE) {
- register processor_set_t pset;
+ processor_set_t pset;
pset = myprocessor->processor_set;
simple_lock(&pset->idle_lock);
@@ -1797,7 +1778,7 @@ retry:
void idle_thread(void)
{
- register thread_t self = current_thread();
+ thread_t self = current_thread();
spl_t s;
stack_privilege(self);
@@ -1900,10 +1881,10 @@ boolean_t
do_runq_scan(
run_queue_t runq)
{
- register spl_t s;
- register queue_t q;
- register thread_t thread;
- register int count;
+ spl_t s;
+ queue_t q;
+ thread_t thread;
+ int count;
s = splsched();
simple_lock(&runq->lock);
@@ -1964,11 +1945,11 @@ if (do_thread_scan_debug)
void do_thread_scan(void)
{
- register spl_t s;
- register boolean_t restart_needed = 0;
- register thread_t thread;
+ spl_t s;
+ boolean_t restart_needed = 0;
+ thread_t thread;
#if MACH_HOST
- register processor_set_t pset;
+ processor_set_t pset;
#endif /* MACH_HOST */
do {
@@ -2012,12 +1993,12 @@ void do_thread_scan(void)
#if DEBUG
void checkrq(
run_queue_t rq,
- char *msg)
+ const char *msg)
{
- register queue_t q1;
- register int i, j;
- register queue_entry_t e;
- register int low;
+ queue_t q1;
+ int i, j;
+ queue_entry_t e;
+ int low;
low = -1;
j = 0;
@@ -2048,10 +2029,10 @@ void checkrq(
}
void thread_check(
- register thread_t th,
- register run_queue_t rq)
+ thread_t th,
+ run_queue_t rq)
{
- register unsigned int whichq;
+ unsigned int whichq;
whichq = th->sched_pri;
if (whichq >= NRQS) {
diff --git a/kern/sched_prim.h b/kern/sched_prim.h
index 5311d160..dfb2f54b 100644
--- a/kern/sched_prim.h
+++ b/kern/sched_prim.h
@@ -52,6 +52,8 @@ typedef void *event_t; /* wait event */
typedef void (*continuation_t)(void); /* continuation */
+#define thread_no_continuation ((continuation_t) 0) /* no continuation */
+
/*
* Exported interface to sched_prim.c.
*/
@@ -69,7 +71,7 @@ extern void thread_sleep(
event_t event,
simple_lock_t lock,
boolean_t interruptible);
-extern void thread_wakeup(); /* for function pointers */
+extern void thread_wakeup(void); /* for function pointers */
extern void thread_wakeup_prim(
event_t event,
boolean_t one_thread,
@@ -103,7 +105,7 @@ extern boolean_t thread_handoff(
thread_t old_thread,
continuation_t continuation,
thread_t new_thread);
-extern void recompute_priorities();
+extern void recompute_priorities(void *param);
extern void update_priority(
thread_t thread);
extern void compute_my_priority(
@@ -115,7 +117,7 @@ extern void compute_priority(
thread_t thread,
boolean_t resched);
extern void thread_timeout_setup(
- register thread_t thread);
+ thread_t thread);
/*
* Routines defined as macros
@@ -132,13 +134,10 @@ extern void thread_timeout_setup(
* Machine-dependent code must define these functions.
*/
-extern void thread_bootstrap_return(void);
-extern void thread_exception_return(void);
-#ifdef __GNUC__
+extern void thread_bootstrap_return(void) __attribute__((noreturn));
+extern void thread_exception_return(void) __attribute__((noreturn));
extern void __attribute__((__noreturn__)) thread_syscall_return(kern_return_t);
-#else
-extern void thread_syscall_return(kern_return_t);
-#endif
+
extern thread_t switch_context(
thread_t old_thread,
continuation_t continuation,
@@ -153,7 +152,7 @@ extern void stack_handoff(
* or are defined directly by machine-dependent code.
*/
-extern void stack_alloc(
+extern kern_return_t stack_alloc(
thread_t thread,
void (*resume)(thread_t));
extern boolean_t stack_alloc_try(
@@ -172,4 +171,18 @@ extern void stack_free(
#define convert_ipc_timeout_to_ticks(millis) \
(((millis) * hz + 999) / 1000)
+void set_pri(thread_t th, int pri, boolean_t resched);
+void do_thread_scan(void);
+thread_t choose_pset_thread(processor_t myprocessor, processor_set_t pset);
+
+#if DEBUG
+#include <kern/sched.h> /* for run_queue_t */
+
+void checkrq(run_queue_t rq, const char *msg);
+void thread_check(thread_t th, run_queue_t rq);
+#endif /* DEBUG */
+
+extern void idle_thread(void) __attribute__((noreturn));
+extern void sched_thread(void);
+
#endif /* _KERN_SCHED_PRIM_H_ */
diff --git a/kern/server_loop.ch b/kern/server_loop.ch
deleted file mode 100644
index 409e013d..00000000
--- a/kern/server_loop.ch
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Mach Operating System
- * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
- * All Rights Reserved.
- *
- * Permission to use, copy, modify and distribute this software and its
- * documentation is hereby granted, provided that both the copyright
- * notice and this permission notice appear in all copies of the
- * software, derivative works or modified versions, and any portions
- * thereof, and that both notices appear in supporting documentation.
- *
- * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
- * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
- * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
- * Carnegie Mellon requests users of this software to return to
- *
- * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
- * School of Computer Science
- * Carnegie Mellon University
- * Pittsburgh PA 15213-3890
- *
- * any improvements or extensions that they make and grant Carnegie Mellon
- * the rights to redistribute these changes.
- */
-/*
- * File: kern/server_loop.c
- *
- * A common server loop for builtin tasks.
- */
-
-/*
- * Must define symbols for:
- * SERVER_NAME String name of this module
- * SERVER_LOOP Routine name for the loop
- * SERVER_DISPATCH MiG function(s) to handle message
- *
- * Must redefine symbols for pager_server functions.
- */
-
-#include <kern/debug.h>
-#include <kern/kalloc.h>
-#include <mach/port.h>
-#include <mach/message.h>
-#include <vm/vm_kern.h> /* for kernel_map */
-
-void SERVER_LOOP(rcv_set, max_size)
-{
- register mach_msg_header_t *in_msg;
- register mach_msg_header_t *out_msg;
- register mach_msg_header_t *tmp_msg;
- vm_offset_t messages;
- mach_msg_return_t r;
-
- /*
- * Allocate our message buffers.
- */
-
- messages = kalloc(2 * max_size);
- if (messages == 0)
- panic(SERVER_NAME);
- in_msg = (mach_msg_header_t *) messages;
- out_msg = (mach_msg_header_t *) (messages + max_size);
-
- /*
- * Service loop... receive messages and process them.
- */
-
- for (;;) {
- /* receive first message */
-
- receive_msg:
- r = mach_msg(in_msg, MACH_RCV_MSG, 0, max_size, rcv_set,
- MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
- if (r == MACH_MSG_SUCCESS)
- break;
-
- printf("%s: receive failed, 0x%x.\n", SERVER_NAME, r);
- }
-
- for (;;) {
- /* process request message */
-
- (void) SERVER_DISPATCH(in_msg, out_msg);
-
- /* send reply and receive next request */
-
- if (out_msg->msgh_remote_port == MACH_PORT_NULL)
- goto receive_msg;
-
- r = mach_msg(out_msg, MACH_SEND_MSG|MACH_RCV_MSG,
- out_msg->msgh_size, max_size, rcv_set,
- MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
- if (r != MACH_MSG_SUCCESS) {
- printf("%s: send/receive failed, 0x%x.\n",
- SERVER_NAME, r);
- goto receive_msg;
- }
-
- /* swap message buffers */
-
- tmp_msg = in_msg; in_msg = out_msg; out_msg = tmp_msg;
- }
-}
diff --git a/kern/shuttle.h b/kern/shuttle.h
index e8e574b6..0b1c2c5e 100644
--- a/kern/shuttle.h
+++ b/kern/shuttle.h
@@ -68,4 +68,4 @@ typedef struct Shuttle Shuttle;
-#endif _KERN_SHUTTLE_H_
+#endif /* _KERN_SHUTTLE_H_ */
diff --git a/kern/slab.c b/kern/slab.c
index 47c2c8f9..eeb94f85 100644
--- a/kern/slab.c
+++ b/kern/slab.c
@@ -58,15 +58,10 @@
* over a hash table. Unlike a hash table, a BST provides a "lookup nearest"
* operation, so obtaining the slab data (whether it is embedded in the slab or
* off slab) from a buffer address simply consists of a "lookup nearest towards
- * 0" tree search. Storing slabs instead of buffers also considerably reduces
- * the number of elements to retain. Finally, a self-balancing tree is a true
- * self-scaling data structure, whereas a hash table requires periodic
- * maintenance and complete resizing, which is expensive. The only drawback is
- * that releasing a buffer to the slab layer takes logarithmic time instead of
- * constant time. But as the data set size is kept reasonable (because slabs
- * are stored instead of buffers) and because the CPU pool layer services most
- * requests, avoiding many accesses to the slab layer, it is considered an
- * acceptable tradeoff.
+ * 0" tree search. Finally, a self-balancing tree is a true self-scaling data
+ * structure, whereas a hash table requires periodic maintenance and complete
+ * resizing, which is expensive. The only drawback is that releasing a buffer
+ * to the slab layer takes logarithmic time instead of constant time.
*
* This implementation uses per-cpu pools of objects, which service most
* allocation requests. These pools act as caches (but are named differently
@@ -79,6 +74,7 @@
#include <string.h>
#include <kern/assert.h>
#include <kern/mach_clock.h>
+#include <kern/macros.h>
#include <kern/printf.h>
#include <kern/slab.h>
#include <kern/kalloc.h>
@@ -86,6 +82,7 @@
#include <mach/vm_param.h>
#include <mach/machine/vm_types.h>
#include <vm/vm_kern.h>
+#include <vm/vm_page.h>
#include <vm/vm_types.h>
#include <sys/types.h>
@@ -96,7 +93,6 @@
/*
* Utility macros.
*/
-#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
#define P2ALIGNED(x, a) (((x) & ((a) - 1)) == 0)
#define ISP2(x) P2ALIGNED(x, x)
#define P2ALIGN(x, a) ((x) & -(a))
@@ -111,19 +107,6 @@
#define KMEM_ALIGN_MIN 8
/*
- * Minimum number of buffers per slab.
- *
- * This value is ignored when the slab size exceeds a threshold.
- */
-#define KMEM_MIN_BUFS_PER_SLAB 8
-
-/*
- * Special slab size beyond which the minimum number of buffers per slab is
- * ignored when computing the slab size of a cache.
- */
-#define KMEM_SLAB_SIZE_THRESHOLD (8 * PAGE_SIZE)
-
-/*
* Special buffer size under which slab data is unconditionnally allocated
* from its associated slab.
*/
@@ -163,11 +146,6 @@
#define KMEM_REDZONE_BYTE 0xbb
/*
- * Size of the VM submap from which default backend functions allocate.
- */
-#define KMEM_MAP_SIZE (128 * 1024 * 1024)
-
-/*
* Shift for the first kalloc cache size.
*/
#define KALLOC_FIRST_SHIFT 5
@@ -217,11 +195,17 @@
*
* The flags don't change once set and can be tested without locking.
*/
-#define KMEM_CF_NO_CPU_POOL 0x01 /* CPU pool layer disabled */
-#define KMEM_CF_SLAB_EXTERNAL 0x02 /* Slab data is off slab */
-#define KMEM_CF_NO_RECLAIM 0x04 /* Slabs are not reclaimable */
-#define KMEM_CF_VERIFY 0x08 /* Debugging facilities enabled */
-#define KMEM_CF_DIRECT 0x10 /* No buf-to-slab tree lookup */
+#define KMEM_CF_SLAB_EXTERNAL 0x01 /* Slab data is off slab */
+#define KMEM_CF_PHYSMEM 0x02 /* Allocate from physical memory */
+#define KMEM_CF_DIRECT 0x04 /* Direct buf-to-slab translation
+ (implies !KMEM_CF_SLAB_EXTERNAL) */
+#define KMEM_CF_USE_TREE 0x08 /* Use red-black tree to track slab
+ data */
+#define KMEM_CF_USE_PAGE 0x10 /* Use page private data to track slab
+ data (implies KMEM_CF_SLAB_EXTERNAL
+ and KMEM_CF_PHYSMEM) */
+#define KMEM_CF_VERIFY 0x20 /* Debugging facilities enabled
+ (implies KMEM_CF_USE_TREE) */
/*
* Options for kmem_cache_alloc_verify().
@@ -278,19 +262,13 @@ static unsigned int kmem_nr_caches;
static simple_lock_data_t __attribute__((used)) kmem_cache_list_lock;
/*
- * VM submap for slab caches.
- */
-static struct vm_map kmem_map_store;
-vm_map_t kmem_map = &kmem_map_store;
-
-/*
* Time of the last memory reclaim, in clock ticks.
*/
static unsigned long kmem_gc_last_tick;
#define kmem_error(format, ...) \
- printf("mem: error: %s(): " format "\n", __func__, \
- ## __VA_ARGS__)
+ panic("mem: error: %s(): " format "\n", __func__, \
+ ## __VA_ARGS__)
#define kmem_warn(format, ...) \
printf("mem: warning: %s(): " format "\n", __func__, \
@@ -384,12 +362,49 @@ static inline void * kmem_bufctl_to_buf(union kmem_bufctl *bufctl,
return (void *)bufctl - cache->bufctl_dist;
}
-static vm_offset_t kmem_pagealloc(vm_size_t size)
+static vm_offset_t
+kmem_pagealloc_physmem(vm_size_t size)
+{
+ struct vm_page *page;
+
+ assert(size == PAGE_SIZE);
+
+ for (;;) {
+ page = vm_page_grab_contig(size, VM_PAGE_SEL_DIRECTMAP);
+
+ if (page != NULL)
+ break;
+
+ VM_PAGE_WAIT(NULL);
+ }
+
+ return phystokv(vm_page_to_pa(page));
+}
+
+static void
+kmem_pagefree_physmem(vm_offset_t addr, vm_size_t size)
+{
+ struct vm_page *page;
+
+ assert(size == PAGE_SIZE);
+ page = vm_page_lookup_pa(kvtophys(addr));
+ assert(page != NULL);
+ vm_page_free_contig(page, size);
+}
+
+static vm_offset_t
+kmem_pagealloc_virtual(vm_size_t size, vm_size_t align)
{
vm_offset_t addr;
kern_return_t kr;
- kr = kmem_alloc_wired(kmem_map, &addr, size);
+ assert(size > PAGE_SIZE);
+ size = vm_page_round(size);
+
+ if (align <= PAGE_SIZE)
+ kr = kmem_alloc_wired(kernel_map, &addr, size);
+ else
+ kr = kmem_alloc_aligned(kernel_map, &addr, size);
if (kr != KERN_SUCCESS)
return 0;
@@ -397,9 +412,29 @@ static vm_offset_t kmem_pagealloc(vm_size_t size)
return addr;
}
-static void kmem_pagefree(vm_offset_t ptr, vm_size_t size)
+static void
+kmem_pagefree_virtual(vm_offset_t addr, vm_size_t size)
{
- kmem_free(kmem_map, ptr, size);
+ assert(size > PAGE_SIZE);
+ size = vm_page_round(size);
+ kmem_free(kernel_map, addr, size);
+}
+
+static vm_offset_t
+kmem_pagealloc(vm_size_t size, vm_size_t align, int flags)
+{
+ assert(align <= size);
+ return (flags & KMEM_CF_PHYSMEM)
+ ? kmem_pagealloc_physmem(size)
+ : kmem_pagealloc_virtual(size, align);
+}
+
+static void
+kmem_pagefree(vm_offset_t addr, vm_size_t size, int flags)
+{
+ return (flags & KMEM_CF_PHYSMEM)
+ ? kmem_pagefree_physmem(addr, size)
+ : kmem_pagefree_virtual(addr, size);
}
static void kmem_slab_create_verify(struct kmem_slab *slab,
@@ -434,28 +469,28 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
union kmem_bufctl *bufctl;
size_t buf_size;
unsigned long buffers;
- void *slab_buf;
+ vm_offset_t slab_buf;
- if (cache->slab_alloc_fn == NULL)
- slab_buf = (void *)kmem_pagealloc(cache->slab_size);
- else
- slab_buf = (void *)cache->slab_alloc_fn(cache->slab_size);
+ slab_buf = kmem_pagealloc(cache->slab_size, cache->align, cache->flags);
- if (slab_buf == NULL)
+ if (slab_buf == 0)
return NULL;
if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
slab = (struct kmem_slab *)kmem_cache_alloc(&kmem_slab_cache);
if (slab == NULL) {
- if (cache->slab_free_fn == NULL)
- kmem_pagefree((vm_offset_t)slab_buf, cache->slab_size);
- else
- cache->slab_free_fn((vm_offset_t)slab_buf, cache->slab_size);
-
+ kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
return NULL;
}
+
+ if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_set_priv(page, slab);
+ }
} else {
slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1;
}
@@ -464,7 +499,7 @@ static struct kmem_slab * kmem_slab_create(struct kmem_cache *cache,
rbtree_node_init(&slab->tree_node);
slab->nr_refs = 0;
slab->first_free = NULL;
- slab->addr = slab_buf + color;
+ slab->addr = (void *)(slab_buf + color);
buf_size = cache->buf_size;
bufctl = kmem_buf_to_bufctl(slab->addr, cache);
@@ -518,25 +553,26 @@ static void kmem_slab_destroy(struct kmem_slab *slab, struct kmem_cache *cache)
assert(slab->nr_refs == 0);
assert(slab->first_free != NULL);
- assert(!(cache->flags & KMEM_CF_NO_RECLAIM));
if (cache->flags & KMEM_CF_VERIFY)
kmem_slab_destroy_verify(slab, cache);
slab_buf = (vm_offset_t)P2ALIGN((unsigned long)slab->addr, PAGE_SIZE);
- if (cache->slab_free_fn == NULL)
- kmem_pagefree(slab_buf, cache->slab_size);
- else
- cache->slab_free_fn(slab_buf, cache->slab_size);
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ /* Not strictly needed, but let's increase safety */
+ page = vm_page_lookup_pa(kvtophys(slab_buf));
+ assert(page != NULL);
+ vm_page_set_priv(page, NULL);
+ }
- if (cache->flags & KMEM_CF_SLAB_EXTERNAL)
kmem_cache_free(&kmem_slab_cache, (vm_offset_t)slab);
-}
+ }
-static inline int kmem_slab_use_tree(int flags)
-{
- return !(flags & KMEM_CF_DIRECT) || (flags & KMEM_CF_VERIFY);
+ kmem_pagefree(slab_buf, cache->slab_size, cache->flags);
}
static inline int kmem_slab_cmp_lookup(const void *addr,
@@ -662,7 +698,7 @@ static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
{
struct kmem_buftag *buftag;
- kmem_error("cache: %s, buffer: %p", cache->name, (void *)buf);
+ kmem_warn("cache: %s, buffer: %p", cache->name, (void *)buf);
switch(error) {
case KMEM_ERR_INVALID:
@@ -694,81 +730,81 @@ static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
}
/*
- * Compute an appropriate slab size for the given cache.
+ * Compute properties such as slab size for the given cache.
*
* Once the slab size is known, this function sets the related properties
- * (buffers per slab and maximum color). It can also set the KMEM_CF_DIRECT
- * and/or KMEM_CF_SLAB_EXTERNAL flags depending on the resulting layout.
+ * (buffers per slab and maximum color). It can also set some KMEM_CF_xxx
+ * flags depending on the resulting layout.
*/
-static void kmem_cache_compute_sizes(struct kmem_cache *cache, int flags)
+static void kmem_cache_compute_properties(struct kmem_cache *cache, int flags)
{
- size_t i, buffers, buf_size, slab_size, free_slab_size, optimal_size;
- size_t waste, waste_min;
- int embed, optimal_embed = optimal_embed;
+ size_t size, waste;
+ int embed;
- buf_size = cache->buf_size;
-
- if (buf_size < KMEM_BUF_SIZE_THRESHOLD)
+ if (cache->buf_size < KMEM_BUF_SIZE_THRESHOLD)
flags |= KMEM_CACHE_NOOFFSLAB;
- i = 0;
- waste_min = (size_t)-1;
-
- do {
- i++;
- slab_size = P2ROUND(i * buf_size, PAGE_SIZE);
- free_slab_size = slab_size;
+ cache->slab_size = PAGE_SIZE;
+ for (;;) {
if (flags & KMEM_CACHE_NOOFFSLAB)
- free_slab_size -= sizeof(struct kmem_slab);
-
- buffers = free_slab_size / buf_size;
- waste = free_slab_size % buf_size;
-
- if (buffers > i)
- i = buffers;
-
- if (flags & KMEM_CACHE_NOOFFSLAB)
- embed = 1;
- else if (sizeof(struct kmem_slab) <= waste) {
embed = 1;
- waste -= sizeof(struct kmem_slab);
- } else {
- embed = 0;
+ else {
+ waste = cache->slab_size % cache->buf_size;
+ embed = (sizeof(struct kmem_slab) <= waste);
}
- if (waste <= waste_min) {
- waste_min = waste;
- optimal_size = slab_size;
- optimal_embed = embed;
- }
- } while ((buffers < KMEM_MIN_BUFS_PER_SLAB)
- && (slab_size < KMEM_SLAB_SIZE_THRESHOLD));
+ size = cache->slab_size;
- assert(!(flags & KMEM_CACHE_NOOFFSLAB) || optimal_embed);
+ if (embed)
+ size -= sizeof(struct kmem_slab);
- cache->slab_size = optimal_size;
- slab_size = cache->slab_size - (optimal_embed
- ? sizeof(struct kmem_slab)
- : 0);
- cache->bufs_per_slab = slab_size / buf_size;
- cache->color_max = slab_size % buf_size;
+ if (size >= cache->buf_size)
+ break;
+
+ cache->slab_size += PAGE_SIZE;
+ }
+
+ cache->bufs_per_slab = size / cache->buf_size;
+ cache->color_max = size % cache->buf_size;
if (cache->color_max >= PAGE_SIZE)
- cache->color_max = PAGE_SIZE - 1;
+ cache->color_max = 0;
- if (optimal_embed) {
+ if (!embed)
+ cache->flags |= KMEM_CF_SLAB_EXTERNAL;
+
+ if ((flags & KMEM_CACHE_PHYSMEM) || (cache->slab_size == PAGE_SIZE)) {
+ cache->flags |= KMEM_CF_PHYSMEM;
+
+ /*
+ * Avoid using larger-than-page slabs backed by the direct physical
+ * mapping to completely prevent physical memory fragmentation from
+ * making slab allocations fail.
+ */
+ if (cache->slab_size != PAGE_SIZE)
+ panic("slab: invalid cache parameters");
+ }
+
+ if (cache->flags & KMEM_CF_VERIFY)
+ cache->flags |= KMEM_CF_USE_TREE;
+
+ if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
+ if (cache->flags & KMEM_CF_PHYSMEM)
+ cache->flags |= KMEM_CF_USE_PAGE;
+ else
+ cache->flags |= KMEM_CF_USE_TREE;
+ } else {
if (cache->slab_size == PAGE_SIZE)
cache->flags |= KMEM_CF_DIRECT;
- } else {
- cache->flags |= KMEM_CF_SLAB_EXTERNAL;
+ else
+ cache->flags |= KMEM_CF_USE_TREE;
}
}
void kmem_cache_init(struct kmem_cache *cache, const char *name,
- size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
- kmem_slab_alloc_fn_t slab_alloc_fn,
- kmem_slab_free_fn_t slab_free_fn, int flags)
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags)
{
#if SLAB_USE_CPU_POOLS
struct kmem_cpu_pool_type *cpu_pool_type;
@@ -782,15 +818,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->flags = 0;
#endif /* SLAB_VERIFY */
- if (flags & KMEM_CACHE_NOCPUPOOL)
- cache->flags |= KMEM_CF_NO_CPU_POOL;
-
- if (flags & KMEM_CACHE_NORECLAIM) {
- assert(slab_free_fn == NULL);
- flags |= KMEM_CACHE_NOOFFSLAB;
- cache->flags |= KMEM_CF_NO_RECLAIM;
- }
-
if (flags & KMEM_CACHE_VERIFY)
cache->flags |= KMEM_CF_VERIFY;
@@ -799,7 +826,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
assert(obj_size > 0);
assert(ISP2(align));
- assert(align < PAGE_SIZE);
buf_size = P2ROUND(obj_size, align);
@@ -818,8 +844,6 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->nr_slabs = 0;
cache->nr_free_slabs = 0;
cache->ctor = ctor;
- cache->slab_alloc_fn = slab_alloc_fn;
- cache->slab_free_fn = slab_free_fn;
strncpy(cache->name, name, sizeof(cache->name));
cache->name[sizeof(cache->name) - 1] = '\0';
cache->buftag_dist = 0;
@@ -834,7 +858,7 @@ void kmem_cache_init(struct kmem_cache *cache, const char *name,
cache->buf_size = buf_size;
}
- kmem_cache_compute_sizes(cache, flags);
+ kmem_cache_compute_properties(cache, flags);
#if SLAB_USE_CPU_POOLS
for (cpu_pool_type = kmem_cpu_pool_types;
@@ -907,9 +931,6 @@ static void kmem_cache_reap(struct kmem_cache *cache)
struct list dead_slabs;
unsigned long nr_free_slabs;
- if (cache->flags & KMEM_CF_NO_RECLAIM)
- return;
-
simple_lock(&cache->lock);
list_set_head(&dead_slabs, &cache->free_slabs);
list_init(&cache->free_slabs);
@@ -970,7 +991,7 @@ static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache)
cache->nr_free_slabs--;
}
- if ((slab->nr_refs == 1) && kmem_slab_use_tree(cache->flags))
+ if ((slab->nr_refs == 1) && (cache->flags & KMEM_CF_USE_TREE))
rbtree_insert(&cache->active_slabs, &slab->tree_node,
kmem_slab_cmp_insert);
@@ -991,17 +1012,26 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
assert(cache->slab_size == PAGE_SIZE);
slab = (struct kmem_slab *)P2END((unsigned long)buf, cache->slab_size)
- 1;
+ } else if (cache->flags & KMEM_CF_USE_PAGE) {
+ struct vm_page *page;
+
+ page = vm_page_lookup_pa(kvtophys((vm_offset_t)buf));
+ assert(page != NULL);
+ slab = vm_page_get_priv(page);
} else {
struct rbtree_node *node;
+ assert(cache->flags & KMEM_CF_USE_TREE);
node = rbtree_lookup_nearest(&cache->active_slabs, buf,
kmem_slab_cmp_lookup, RBTREE_LEFT);
assert(node != NULL);
slab = rbtree_entry(node, struct kmem_slab, tree_node);
- assert((unsigned long)buf < (P2ALIGN((unsigned long)slab->addr
- + cache->slab_size, PAGE_SIZE)));
}
+ assert((unsigned long)buf >= (unsigned long)slab->addr);
+ assert(((unsigned long)buf + cache->buf_size)
+ <= vm_page_trunc((unsigned long)slab->addr + cache->slab_size));
+
assert(slab->nr_refs >= 1);
assert(slab->nr_refs <= cache->bufs_per_slab);
bufctl = kmem_buf_to_bufctl(buf, cache);
@@ -1013,7 +1043,7 @@ static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
if (slab->nr_refs == 0) {
/* The slab has become free */
- if (kmem_slab_use_tree(cache->flags))
+ if (cache->flags & KMEM_CF_USE_TREE)
rbtree_remove(&cache->active_slabs, &slab->tree_node);
if (cache->bufs_per_slab > 1)
@@ -1134,6 +1164,8 @@ static void kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
unsigned char *redzone_byte;
unsigned long slabend;
+ assert(cache->flags & KMEM_CF_USE_TREE);
+
simple_lock(&cache->lock);
node = rbtree_lookup_nearest(&cache->active_slabs, buf,
kmem_slab_cmp_lookup, RBTREE_LEFT);
@@ -1279,16 +1311,12 @@ void slab_bootstrap(void)
void slab_init(void)
{
- vm_offset_t min, max;
-
#if SLAB_USE_CPU_POOLS
struct kmem_cpu_pool_type *cpu_pool_type;
char name[KMEM_CACHE_NAME_SIZE];
size_t i, size;
#endif /* SLAB_USE_CPU_POOLS */
- kmem_submap(kmem_map, kernel_map, &min, &max, KMEM_MAP_SIZE, FALSE);
-
#if SLAB_USE_CPU_POOLS
for (i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) {
cpu_pool_type = &kmem_cpu_pool_types[i];
@@ -1296,7 +1324,7 @@ void slab_init(void)
sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
size = sizeof(void *) * cpu_pool_type->array_size;
kmem_cache_init(cpu_pool_type->array_cache, name, size,
- cpu_pool_type->array_align, NULL, NULL, NULL, 0);
+ cpu_pool_type->array_align, NULL, 0);
}
#endif /* SLAB_USE_CPU_POOLS */
@@ -1304,25 +1332,7 @@ void slab_init(void)
* Prevent off slab data for the slab cache to avoid infinite recursion.
*/
kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
- 0, NULL, NULL, NULL, KMEM_CACHE_NOOFFSLAB);
-}
-
-static vm_offset_t kalloc_pagealloc(vm_size_t size)
-{
- vm_offset_t addr;
- kern_return_t kr;
-
- kr = kmem_alloc_wired(kmem_map, &addr, size);
-
- if (kr != KERN_SUCCESS)
- return 0;
-
- return addr;
-}
-
-static void kalloc_pagefree(vm_offset_t ptr, vm_size_t size)
-{
- kmem_free(kmem_map, ptr, size);
+ 0, NULL, KMEM_CACHE_NOOFFSLAB);
}
void kalloc_init(void)
@@ -1334,8 +1344,7 @@ void kalloc_init(void)
for (i = 0; i < ARRAY_SIZE(kalloc_caches); i++) {
sprintf(name, "kalloc_%lu", size);
- kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL,
- kalloc_pagealloc, kalloc_pagefree, 0);
+ kmem_cache_init(&kalloc_caches[i], name, size, 0, NULL, 0);
size <<= 1;
}
}
@@ -1386,8 +1395,9 @@ vm_offset_t kalloc(vm_size_t size)
if ((buf != 0) && (cache->flags & KMEM_CF_VERIFY))
kalloc_verify(cache, buf, size);
- } else
- buf = (void *)kalloc_pagealloc(size);
+ } else {
+ buf = (void *)kmem_pagealloc_virtual(size, 0);
+ }
return (vm_offset_t)buf;
}
@@ -1428,19 +1438,22 @@ void kfree(vm_offset_t data, vm_size_t size)
kmem_cache_free(cache, data);
} else {
- kalloc_pagefree(data, size);
+ kmem_pagefree_virtual(data, size);
}
}
-void slab_info(void)
+static void _slab_info(int (printx)(const char *fmt, ...))
{
struct kmem_cache *cache;
- vm_size_t mem_usage, mem_reclaimable;
+ vm_size_t mem_usage, mem_reclaimable, mem_total, mem_total_reclaimable;
- printf("cache obj slab bufs objs bufs "
- " total reclaimable\n"
- "name size size /slab usage count "
- " memory memory\n");
+ mem_total = 0;
+ mem_total_reclaimable = 0;
+
+ printx("cache obj slab bufs objs bufs"
+ " total reclaimable\n"
+ "name flags size size /slab usage count"
+ " memory memory\n");
simple_lock(&kmem_cache_list_lock);
@@ -1450,17 +1463,39 @@ void slab_info(void)
mem_usage = (cache->nr_slabs * cache->slab_size) >> 10;
mem_reclaimable = (cache->nr_free_slabs * cache->slab_size) >> 10;
- printf("%-19s %6lu %3luk %4lu %6lu %6lu %7uk %10uk\n",
- cache->name, cache->obj_size, cache->slab_size >> 10,
+ printx("%-20s %04x %7lu %3luk %4lu %6lu %6lu %7uk %10uk\n",
+ cache->name, cache->flags, cache->obj_size,
+ cache->slab_size >> 10,
cache->bufs_per_slab, cache->nr_objs, cache->nr_bufs,
mem_usage, mem_reclaimable);
simple_unlock(&cache->lock);
+
+ mem_total += mem_usage;
+ mem_total_reclaimable += mem_reclaimable;
}
simple_unlock(&kmem_cache_list_lock);
+
+ printx("total: %uk, reclaimable: %uk\n",
+ mem_total, mem_total_reclaimable);
}
+void slab_info(void)
+{
+ _slab_info(printf);
+}
+
+#if MACH_KDB
+#include <ddb/db_output.h>
+
+ void db_show_slab_info(void)
+{
+ _slab_info(db_printf);
+}
+
+#endif /* MACH_KDB */
+
#if MACH_DEBUG
kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
unsigned int *infoCntp)
@@ -1468,7 +1503,7 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
struct kmem_cache *cache;
cache_info_t *info;
unsigned int i, nr_caches;
- vm_size_t info_size = info_size;
+ vm_size_t info_size = 0;
kern_return_t kr;
if (host == HOST_NULL)
@@ -1502,17 +1537,8 @@ kern_return_t host_slab_info(host_t host, cache_info_array_t *infop,
i = 0;
list_for_each_entry(&kmem_cache_list, cache, node) {
- simple_lock(&cache_lock);
- info[i].flags = ((cache->flags & KMEM_CF_NO_CPU_POOL)
- ? CACHE_FLAGS_NO_CPU_POOL : 0)
- | ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
- ? CACHE_FLAGS_SLAB_EXTERNAL : 0)
- | ((cache->flags & KMEM_CF_NO_RECLAIM)
- ? CACHE_FLAGS_NO_RECLAIM : 0)
- | ((cache->flags & KMEM_CF_VERIFY)
- ? CACHE_FLAGS_VERIFY : 0)
- | ((cache->flags & KMEM_CF_DIRECT)
- ? CACHE_FLAGS_DIRECT : 0);
+ simple_lock(&cache->lock);
+ info[i].flags = cache->flags;
#if SLAB_USE_CPU_POOLS
info[i].cpu_pool_size = cache->cpu_pool_type->array_size;
#else /* SLAB_USE_CPU_POOLS */
diff --git a/kern/slab.h b/kern/slab.h
index b842fb74..8527c9db 100644
--- a/kern/slab.h
+++ b/kern/slab.h
@@ -47,6 +47,8 @@
#ifndef _KERN_SLAB_H
#define _KERN_SLAB_H
+#include <cache.h>
+#include <kern/cpu_number.h>
#include <kern/lock.h>
#include <kern/list.h>
#include <kern/rbtree.h>
@@ -55,10 +57,6 @@
#include <vm/vm_types.h>
#if SLAB_USE_CPU_POOLS
-/*
- * L1 cache line size.
- */
-#define CPU_L1_SIZE (1 << CPU_L1_SHIFT)
/*
* Per-processor cache of pre-constructed objects.
@@ -139,22 +137,20 @@ struct kmem_slab {
typedef void (*kmem_cache_ctor_t)(void *obj);
/*
- * Types for slab allocation/free functions.
- *
- * All addresses and sizes must be page-aligned.
- */
-typedef vm_offset_t (*kmem_slab_alloc_fn_t)(vm_size_t);
-typedef void (*kmem_slab_free_fn_t)(vm_offset_t, vm_size_t);
-
-/*
- * Cache name buffer size.
+ * Cache name buffer size. The size is chosen so that struct
+ * kmem_cache fits into two cache lines. The size of a cache line on
+ * a typical CPU is 64 bytes.
*/
-#define KMEM_CACHE_NAME_SIZE 32
+#define KMEM_CACHE_NAME_SIZE 24
/*
* Cache of objects.
*
* Locking order : cpu_pool -> cache. CPU pools locking is ordered by CPU ID.
+ *
+ * Currently, SLAB_USE_CPU_POOLS is not defined. KMEM_CACHE_NAME_SIZE
+ * is chosen so that the struct fits into two cache lines. The first
+ * cache line contains all hot fields.
*/
struct kmem_cache {
#if SLAB_USE_CPU_POOLS
@@ -170,25 +166,25 @@ struct kmem_cache {
struct list free_slabs;
struct rbtree active_slabs;
int flags;
+ size_t bufctl_dist; /* Distance from buffer to bufctl */
+ size_t slab_size;
+ unsigned long bufs_per_slab;
+ unsigned long nr_objs; /* Number of allocated objects */
+ unsigned long nr_free_slabs;
+ kmem_cache_ctor_t ctor;
+ /* All fields below are cold */
size_t obj_size; /* User-provided size */
+ /* Assuming ! SLAB_USE_CPU_POOLS, here is the cacheline boundary */
size_t align;
size_t buf_size; /* Aligned object size */
- size_t bufctl_dist; /* Distance from buffer to bufctl */
- size_t slab_size;
size_t color;
size_t color_max;
- unsigned long bufs_per_slab;
- unsigned long nr_objs; /* Number of allocated objects */
unsigned long nr_bufs; /* Total number of buffers */
unsigned long nr_slabs;
- unsigned long nr_free_slabs;
- kmem_cache_ctor_t ctor;
- kmem_slab_alloc_fn_t slab_alloc_fn;
- kmem_slab_free_fn_t slab_free_fn;
char name[KMEM_CACHE_NAME_SIZE];
size_t buftag_dist; /* Distance from buffer to buftag */
size_t redzone_pad; /* Bytes from end of object to redzone word */
-};
+} __cacheline_aligned;
/*
* Mach-style declarations for struct kmem_cache.
@@ -197,26 +193,18 @@ typedef struct kmem_cache *kmem_cache_t;
#define KMEM_CACHE_NULL ((kmem_cache_t) 0)
/*
- * VM submap for slab allocations.
- */
-extern vm_map_t kmem_map;
-
-/*
* Cache initialization flags.
*/
-#define KMEM_CACHE_NOCPUPOOL 0x1 /* Don't use the per-cpu pools */
-#define KMEM_CACHE_NOOFFSLAB 0x2 /* Don't allocate external slab data */
-#define KMEM_CACHE_NORECLAIM 0x4 /* Never give slabs back to their source,
- implies KMEM_CACHE_NOOFFSLAB */
-#define KMEM_CACHE_VERIFY 0x8 /* Use debugging facilities */
+#define KMEM_CACHE_NOOFFSLAB 0x1 /* Don't allocate external slab data */
+#define KMEM_CACHE_PHYSMEM 0x2 /* Allocate from physical memory */
+#define KMEM_CACHE_VERIFY 0x4 /* Use debugging facilities */
/*
* Initialize a cache.
*/
void kmem_cache_init(struct kmem_cache *cache, const char *name,
- size_t obj_size, size_t align, kmem_cache_ctor_t ctor,
- kmem_slab_alloc_fn_t slab_alloc_fn,
- kmem_slab_free_fn_t slab_free_fn, int flags);
+ size_t obj_size, size_t align,
+ kmem_cache_ctor_t ctor, int flags);
/*
* Allocate an object from a cache.
@@ -244,4 +232,8 @@ void slab_collect(void);
*/
void slab_info(void);
+#if MACH_KDB
+void db_show_slab_info(void);
+#endif /* MACH_KDB */
+
#endif /* _KERN_SLAB_H */
diff --git a/kern/startup.c b/kern/startup.c
index 6dced433..bd296943 100644
--- a/kern/startup.c
+++ b/kern/startup.c
@@ -39,51 +39,41 @@
#include <kern/machine.h>
#include <kern/mach_factor.h>
#include <kern/mach_clock.h>
-#include <kern/printf.h>
#include <kern/processor.h>
+#include <kern/rdxtree.h>
#include <kern/sched_prim.h>
#include <kern/task.h>
#include <kern/thread.h>
#include <kern/thread_swap.h>
#include <kern/timer.h>
#include <kern/xpr.h>
+#include <kern/bootstrap.h>
#include <kern/time_stamp.h>
+#include <kern/startup.h>
#include <vm/vm_kern.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
+#include <vm/vm_init.h>
+#include <vm/vm_pageout.h>
#include <machine/machspl.h>
#include <machine/pcb.h>
#include <machine/pmap.h>
#include <machine/model_dep.h>
#include <mach/version.h>
+#include <device/device_init.h>
-
-
-extern void vm_mem_init();
-extern void vm_mem_bootstrap();
-extern void init_timeout();
-extern void machine_init();
-
-extern void idle_thread();
-extern void vm_pageout();
-extern void reaper_thread();
-extern void swapin_thread();
-extern void sched_thread();
-
-extern void bootstrap_create();
-extern void device_service_create();
-
-void cpu_launch_first_thread(); /* forward */
-void start_kernel_threads(); /* forward */
+#if MACH_KDB
+#include <device/cons.h>
+#endif /* MACH_KDB */
#if ! MACH_KBD
-boolean_t reboot_on_panic = 1;
+boolean_t reboot_on_panic = TRUE;
#endif
#if NCPUS > 1
-extern void start_other_cpus();
-extern void action_thread();
+#include <machine/mp_desc.h>
+#include <kern/machine.h>
#endif /* NCPUS > 1 */
/* XX */
@@ -96,7 +86,7 @@ extern char *kernel_cmdline;
*
* Assumes that master_cpu is set.
*/
-void setup_main()
+void setup_main(void)
{
thread_t startup_thread;
@@ -113,15 +103,15 @@ void setup_main()
}
#else /* MACH_KDB */
if (strstr (kernel_cmdline, "-H ")) {
- reboot_on_panic = 0;
+ reboot_on_panic = FALSE;
}
#endif /* MACH_KDB */
panic_init();
- printf_init();
sched_init();
vm_mem_bootstrap();
+ rdxtree_cache_init();
ipc_bootstrap();
vm_mem_init();
ipc_init();
@@ -146,7 +136,7 @@ void setup_main()
mapable_time_init();
machine_info.max_cpus = NCPUS;
- machine_info.memory_size = phys_last_addr - phys_first_addr; /* XXX mem_size */
+ machine_info.memory_size = vm_page_mem_size(); /* XXX phys_addr_t -> vm_size_t */
machine_info.avail_cpus = 0;
machine_info.major_version = KERNEL_MAJOR_VERSION;
machine_info.minor_version = KERNEL_MINOR_VERSION;
@@ -165,7 +155,7 @@ void setup_main()
* Kick off the time-out driven routines by calling
* them the first time.
*/
- recompute_priorities();
+ recompute_priorities(NULL);
compute_mach_factor();
/*
@@ -208,9 +198,9 @@ void setup_main()
* Now running in a thread. Create the rest of the kernel threads
* and the bootstrap task.
*/
-void start_kernel_threads()
+void start_kernel_threads(void)
{
- register int i;
+ int i;
/*
* Create the idle threads and the other
@@ -276,7 +266,7 @@ void start_kernel_threads()
}
#if NCPUS > 1
-void slave_main()
+void slave_main(void)
{
cpu_launch_first_thread(THREAD_NULL);
}
@@ -286,10 +276,9 @@ void slave_main()
* Start up the first thread on a CPU.
* First thread is specified for the master CPU.
*/
-void cpu_launch_first_thread(th)
- register thread_t th;
+void cpu_launch_first_thread(thread_t th)
{
- register int mycpu;
+ int mycpu;
mycpu = cpu_number();
diff --git a/kern/startup.h b/kern/startup.h
new file mode 100644
index 00000000..d924d154
--- /dev/null
+++ b/kern/startup.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _KERN_STARTUP_H_
+#define _KERN_STARTUP_H_
+
+#include <kern/thread.h>
+
+extern void setup_main(void);
+void cpu_launch_first_thread(thread_t th);
+void start_kernel_threads(void);
+
+#endif /* _KERN_STARTUP_H_ */
diff --git a/kern/strings.c b/kern/strings.c
index 3676f98e..e299534a 100644
--- a/kern/strings.c
+++ b/kern/strings.c
@@ -50,15 +50,15 @@
* the contents are identical up to the length of s2.
* It returns < 0 if the first differing character is smaller
* in s1 than in s2 or if s1 is shorter than s2 and the
- * contents are identical upto the length of s1.
+ * contents are identical up to the length of s1.
*/
-int
+int __attribute__ ((pure))
strcmp(
- register const char *s1,
- register const char *s2)
+ const char *s1,
+ const char *s2)
{
- register unsigned int a, b;
+ unsigned int a, b;
do {
a = *s1++;
@@ -80,13 +80,13 @@ strcmp(
* comparison runs for at most "n" characters.
*/
-int
+int __attribute__ ((pure))
strncmp(
- register const char *s1,
- register const char *s2,
+ const char *s1,
+ const char *s2,
size_t n)
{
- register unsigned int a, b;
+ unsigned int a, b;
while (n != 0) {
a = *s1++;
@@ -113,10 +113,10 @@ strncmp(
char *
strcpy(
- register char *to,
- register const char *from)
+ char *to,
+ const char *from)
{
- register char *ret = to;
+ char *ret = to;
while ((*to++ = *from++) != '\0')
continue;
@@ -135,11 +135,11 @@ strcpy(
char *
strncpy(
- register char *to,
- register const char *from,
- register size_t count)
+ char *to,
+ const char *from,
+ size_t count)
{
- register char *ret = to;
+ char *ret = to;
while (count != 0) {
count--;
@@ -157,15 +157,15 @@ strncpy(
/*
* Abstract:
- * strlen returns the number of characters in "string" preceeding
+ * strlen returns the number of characters in "string" preceding
* the terminating null character.
*/
-size_t
+size_t __attribute__ ((pure))
strlen(
- register const char *string)
+ const char *string)
{
- register const char *ret = string;
+ const char *ret = string;
while (*string++ != '\0')
continue;
diff --git a/kern/syscall_emulation.c b/kern/syscall_emulation.c
index c1c3096c..95e91d55 100644
--- a/kern/syscall_emulation.c
+++ b/kern/syscall_emulation.c
@@ -57,7 +57,7 @@
/*
* eml_init: initialize user space emulation code
*/
-void eml_init()
+void eml_init(void)
{
}
@@ -68,10 +68,11 @@ void eml_init()
* vector.
*/
-void eml_task_reference(task, parent)
- task_t task, parent;
+void eml_task_reference(
+ task_t task,
+ task_t parent)
{
- register eml_dispatch_t eml;
+ eml_dispatch_t eml;
if (parent == TASK_NULL)
eml = EML_DISPATCH_NULL;
@@ -94,9 +95,9 @@ void eml_task_reference(task, parent)
*/
void eml_task_deallocate(task)
- task_t task;
+ const task_t task;
{
- register eml_dispatch_t eml;
+ eml_dispatch_t eml;
eml = task->eml_dispatch;
if (eml != EML_DISPATCH_NULL) {
@@ -116,12 +117,11 @@ void eml_task_deallocate(task)
* set a list of emulated system calls for this task.
*/
kern_return_t
-task_set_emulation_vector_internal(task, vector_start, emulation_vector,
- emulation_vector_count)
- task_t task;
- int vector_start;
- emulation_vector_t emulation_vector;
- unsigned int emulation_vector_count;
+task_set_emulation_vector_internal(
+ task_t task,
+ int vector_start,
+ emulation_vector_t emulation_vector,
+ unsigned int emulation_vector_count)
{
eml_dispatch_t cur_eml, new_eml, old_eml;
vm_size_t new_size;
@@ -295,12 +295,11 @@ task_set_emulation_vector_internal(task, vector_start, emulation_vector,
* The list is out-of-line.
*/
kern_return_t
-task_set_emulation_vector(task, vector_start, emulation_vector,
- emulation_vector_count)
- task_t task;
- int vector_start;
- emulation_vector_t emulation_vector;
- unsigned int emulation_vector_count;
+task_set_emulation_vector(
+ task_t task,
+ int vector_start,
+ emulation_vector_t emulation_vector,
+ unsigned int emulation_vector_count)
{
kern_return_t kr;
vm_offset_t emul_vector_addr;
@@ -342,12 +341,11 @@ task_set_emulation_vector(task, vector_start, emulation_vector,
* List is returned out-of-line.
*/
kern_return_t
-task_get_emulation_vector(task, vector_start, emulation_vector,
- emulation_vector_count)
- task_t task;
- int *vector_start; /* out */
- emulation_vector_t *emulation_vector; /* out */
- unsigned int *emulation_vector_count; /* out */
+task_get_emulation_vector(
+ task_t task,
+ int *vector_start, /* out */
+ emulation_vector_t *emulation_vector, /* out */
+ unsigned int *emulation_vector_count) /* out */
{
eml_dispatch_t eml;
vm_size_t vector_size, size;
@@ -445,10 +443,10 @@ task_get_emulation_vector(task, vector_start, emulation_vector,
* task_set_emulation: [Server Entry]
* set up for user space emulation of syscalls within this task.
*/
-kern_return_t task_set_emulation(task, routine_entry_pt, routine_number)
- task_t task;
- vm_offset_t routine_entry_pt;
- int routine_number;
+kern_return_t task_set_emulation(
+ task_t task,
+ vm_offset_t routine_entry_pt,
+ int routine_number)
{
return task_set_emulation_vector_internal(task, routine_number,
&routine_entry_pt, 1);
diff --git a/kern/syscall_emulation.h b/kern/syscall_emulation.h
index 501b0a83..bf20e441 100644
--- a/kern/syscall_emulation.h
+++ b/kern/syscall_emulation.h
@@ -33,6 +33,7 @@
#ifndef __ASSEMBLER__
#include <mach/machine/vm_types.h>
#include <kern/lock.h>
+#include <kern/task.h>
typedef vm_offset_t eml_routine_t;
@@ -56,6 +57,11 @@ typedef vm_offset_t *emulation_vector_t; /* Variable-length array */
#define EML_MOD (err_kern|err_sub(2))
#define EML_BAD_TASK (EML_MOD|0x0001)
#define EML_BAD_CNT (EML_MOD|0x0002)
+
+extern void eml_init(void);
+extern void eml_task_reference(task_t task, task_t parent);
+extern void eml_task_deallocate(task_t task);
+
#endif /* __ASSEMBLER__ */
#endif /* _KERN_SYSCALL_EMULATION_H_ */
diff --git a/kern/syscall_subr.c b/kern/syscall_subr.c
index ae2d7d73..6d23462c 100644
--- a/kern/syscall_subr.c
+++ b/kern/syscall_subr.c
@@ -48,8 +48,6 @@
#include <mach/policy.h>
#endif /* MACH_FIXPRI */
-
-
/*
* swtch and swtch_pri both attempt to context switch (logic in
* thread_block no-ops the context switch if nothing would happen).
@@ -63,12 +61,9 @@
* returned, the thread should make one more check on the
* lock and then be a good citizen and really suspend.
*/
-
-void thread_depress_priority(thread_t, mach_msg_timeout_t);
-
void swtch_continue(void)
{
- register processor_t myprocessor;
+ processor_t myprocessor;
myprocessor = current_processor();
thread_syscall_return(myprocessor->runq.count > 0 ||
@@ -78,7 +73,7 @@ void swtch_continue(void)
boolean_t swtch(void)
{
- register processor_t myprocessor;
+ processor_t myprocessor;
#if NCPUS > 1
myprocessor = current_processor();
@@ -96,8 +91,8 @@ boolean_t swtch(void)
void swtch_pri_continue(void)
{
- register thread_t thread = current_thread();
- register processor_t myprocessor;
+ thread_t thread = current_thread();
+ processor_t myprocessor;
if (thread->depress_priority >= 0)
(void) thread_depress_abort(thread);
@@ -107,15 +102,10 @@ void swtch_pri_continue(void)
/*NOTREACHED*/
}
-boolean_t swtch_pri(pri)
- int pri;
+boolean_t swtch_pri(int pri)
{
- register thread_t thread = current_thread();
- register processor_t myprocessor;
-
-#ifdef lint
- pri++;
-#endif /* lint */
+ thread_t thread = current_thread();
+ processor_t myprocessor;
#if NCPUS > 1
myprocessor = current_processor();
@@ -142,7 +132,7 @@ boolean_t swtch_pri(pri)
void thread_switch_continue(void)
{
- register thread_t cur_thread = current_thread();
+ thread_t cur_thread = current_thread();
/*
* Restore depressed priority
@@ -161,13 +151,13 @@ void thread_switch_continue(void)
* Fixed priority threads that call this get what they asked for
* even if that violates priority order.
*/
-kern_return_t thread_switch(thread_name, option, option_time)
-mach_port_t thread_name;
-int option;
-mach_msg_timeout_t option_time;
+kern_return_t thread_switch(
+ mach_port_t thread_name,
+ int option,
+ mach_msg_timeout_t option_time)
{
- register thread_t cur_thread = current_thread();
- register processor_t myprocessor;
+ thread_t cur_thread = current_thread();
+ processor_t myprocessor;
ipc_port_t port;
/*
@@ -208,8 +198,8 @@ mach_msg_timeout_t option_time;
* Get corresponding thread.
*/
if (ip_active(port) && (ip_kotype(port) == IKOT_THREAD)) {
- register thread_t thread;
- register spl_t s;
+ thread_t thread;
+ spl_t s;
thread = (thread_t) port->ip_kobject;
/*
@@ -289,9 +279,9 @@ mach_msg_timeout_t option_time;
* of zero will result in no timeout being scheduled.
*/
void
-thread_depress_priority(thread, depress_time)
-register thread_t thread;
-mach_msg_timeout_t depress_time;
+thread_depress_priority(
+ thread_t thread,
+ mach_msg_timeout_t depress_time)
{
unsigned int ticks;
spl_t s;
@@ -312,8 +302,8 @@ mach_msg_timeout_t depress_time;
* sched_pri to their lowest possible values.
*/
thread->depress_priority = thread->priority;
- thread->priority = 31;
- thread->sched_pri = 31;
+ thread->priority = NRQS-1;
+ thread->sched_pri = NRQS-1;
if (ticks != 0)
set_timeout(&thread->depress_timer, ticks);
@@ -327,8 +317,7 @@ mach_msg_timeout_t depress_time;
* Timeout routine for priority depression.
*/
void
-thread_depress_timeout(thread)
-register thread_t thread;
+thread_depress_timeout(thread_t thread)
{
spl_t s;
@@ -356,8 +345,7 @@ register thread_t thread;
* Prematurely abort priority depression if there is one.
*/
kern_return_t
-thread_depress_abort(thread)
-register thread_t thread;
+thread_depress_abort(thread_t thread)
{
spl_t s;
diff --git a/kern/syscall_subr.h b/kern/syscall_subr.h
index a2e39205..b6b61ab2 100644
--- a/kern/syscall_subr.h
+++ b/kern/syscall_subr.h
@@ -37,5 +37,6 @@ extern int thread_switch(mach_port_t, int, mach_msg_timeout_t);
extern void thread_depress_timeout(thread_t);
extern kern_return_t thread_depress_abort(thread_t);
extern void mach_print(const char *);
+extern void thread_depress_priority(thread_t thread, mach_msg_timeout_t depress_time);
#endif /* _KERN_SYSCALL_SUBR_H_ */
diff --git a/kern/syscall_sw.c b/kern/syscall_sw.c
index 607d843e..a383e467 100644
--- a/kern/syscall_sw.c
+++ b/kern/syscall_sw.c
@@ -36,6 +36,8 @@
#include <mach/mach_traps.h>
#include <mach/message.h>
#include <kern/syscall_subr.h>
+#include <kern/ipc_mig.h>
+#include <kern/eventcount.h>
#include <ipc/mach_port.h>
@@ -56,41 +58,20 @@
* the positive numbers) are reserved for Unix.
*/
-int kern_invalid_debug = 0;
+boolean_t kern_invalid_debug = FALSE;
-mach_port_t null_port()
+mach_port_t null_port(void)
{
if (kern_invalid_debug) SoftDebugger("null_port mach trap");
return(MACH_PORT_NULL);
}
-kern_return_t kern_invalid()
+kern_return_t kern_invalid(void)
{
if (kern_invalid_debug) SoftDebugger("kern_invalid mach trap");
return(KERN_INVALID_ARGUMENT);
}
-extern kern_return_t syscall_vm_map();
-extern kern_return_t syscall_vm_allocate();
-extern kern_return_t syscall_vm_deallocate();
-
-extern kern_return_t syscall_task_create();
-extern kern_return_t syscall_task_terminate();
-extern kern_return_t syscall_task_suspend();
-extern kern_return_t syscall_task_set_special_port();
-
-extern kern_return_t syscall_mach_port_allocate();
-extern kern_return_t syscall_mach_port_deallocate();
-extern kern_return_t syscall_mach_port_insert_right();
-extern kern_return_t syscall_mach_port_allocate_name();
-
-extern kern_return_t syscall_thread_depress_abort();
-extern kern_return_t evc_wait();
-extern kern_return_t evc_wait_clear();
-
-extern kern_return_t syscall_device_write_request();
-extern kern_return_t syscall_device_writev_request();
-
mach_trap_t mach_trap_table[] = {
MACH_TRAP(kern_invalid, 0), /* 0 */ /* Unix */
MACH_TRAP(kern_invalid, 0), /* 1 */ /* Unix */
diff --git a/kern/syscall_sw.h b/kern/syscall_sw.h
index 87fc1bb5..1edf1c7f 100644
--- a/kern/syscall_sw.h
+++ b/kern/syscall_sw.h
@@ -37,15 +37,15 @@ typedef struct {
int mach_trap_arg_count;
int (*mach_trap_function)();
boolean_t mach_trap_stack;
- int mach_trap_unused;
+ const char *mach_trap_name;
} mach_trap_t;
extern mach_trap_t mach_trap_table[];
extern int mach_trap_count;
#define MACH_TRAP(name, arg_count) \
- { (arg_count), (int (*)()) (name), FALSE, 0 }
+ { (arg_count), (int (*)()) (name), FALSE, #name }
#define MACH_TRAP_STACK(name, arg_count) \
- { (arg_count), (int (*)()) (name), TRUE, 0 }
+ { (arg_count), (int (*)()) (name), TRUE, #name }
#endif /* _KERN_SYSCALL_SW_H_ */
diff --git a/kern/task.c b/kern/task.c
index 114dd319..0f24e44d 100644
--- a/kern/task.c
+++ b/kern/task.c
@@ -37,6 +37,7 @@
#include <mach/vm_param.h>
#include <mach/task_info.h>
#include <mach/task_special_ports.h>
+#include <mach_debug/mach_debug_types.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_types.h>
#include <kern/debug.h>
@@ -45,22 +46,24 @@
#include <kern/slab.h>
#include <kern/kalloc.h>
#include <kern/processor.h>
+#include <kern/printf.h>
#include <kern/sched_prim.h> /* for thread_wakeup */
#include <kern/ipc_tt.h>
+#include <kern/syscall_emulation.h>
+#include <kern/task_notify.user.h>
#include <vm/vm_kern.h> /* for kernel_map, ipc_kernel_map */
#include <machine/machspl.h> /* for splsched */
task_t kernel_task = TASK_NULL;
struct kmem_cache task_cache;
-extern void eml_init(void);
-extern void eml_task_reference(task_t, task_t);
-extern void eml_task_deallocate(task_t);
+/* Where to send notifications about newly created tasks. */
+ipc_port_t new_task_notification = NULL;
void task_init(void)
{
kmem_cache_init(&task_cache, "task", sizeof(struct task), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
eml_init();
machine_task_module_init ();
@@ -71,6 +74,7 @@ void task_init(void)
* for other initialization. (:-()
*/
(void) task_create(TASK_NULL, FALSE, &kernel_task);
+ (void) task_set_name(kernel_task, "gnumach");
}
kern_return_t task_create(
@@ -78,16 +82,15 @@ kern_return_t task_create(
boolean_t inherit_memory,
task_t *child_task) /* OUT */
{
- register task_t new_task;
- register processor_set_t pset;
+ task_t new_task;
+ processor_set_t pset;
#if FAST_TAS
int i;
#endif
new_task = (task_t) kmem_cache_alloc(&task_cache);
- if (new_task == TASK_NULL) {
- panic("task_create: no memory for task structure");
- }
+ if (new_task == TASK_NULL)
+ return KERN_RESOURCE_SHORTAGE;
/* one ref for just being alive; one for our caller */
new_task->ref_count = 2;
@@ -167,6 +170,21 @@ kern_return_t task_create(
}
#endif /* FAST_TAS */
+ if (parent_task == TASK_NULL)
+ snprintf (new_task->name, sizeof new_task->name, "%p",
+ new_task);
+ else
+ snprintf (new_task->name, sizeof new_task->name, "(%.*s)",
+ sizeof new_task->name - 3, parent_task->name);
+
+ if (new_task_notification != NULL) {
+ task_reference (new_task);
+ task_reference (parent_task);
+ mach_notify_new_task (new_task_notification,
+ convert_task_to_port (new_task),
+ convert_task_to_port (parent_task));
+ }
+
ipc_task_enable(new_task);
*child_task = new_task;
@@ -181,10 +199,10 @@ kern_return_t task_create(
* is never in this task.
*/
void task_deallocate(
- register task_t task)
+ task_t task)
{
- register int c;
- register processor_set_t pset;
+ int c;
+ processor_set_t pset;
if (task == TASK_NULL)
return;
@@ -210,7 +228,7 @@ void task_deallocate(
}
void task_reference(
- register task_t task)
+ task_t task)
{
if (task == TASK_NULL)
return;
@@ -227,11 +245,11 @@ void task_reference(
* (kern/thread.c) about problems with terminating the "current task."
*/
kern_return_t task_terminate(
- register task_t task)
+ task_t task)
{
- register thread_t thread, cur_thread;
- register queue_head_t *list;
- register task_t cur_task;
+ thread_t thread, cur_thread;
+ queue_head_t *list;
+ task_t cur_task;
spl_t s;
if (task == TASK_NULL)
@@ -270,6 +288,7 @@ kern_return_t task_terminate(
thread_terminate(cur_thread);
return KERN_FAILURE;
}
+ task_hold_locked(task);
task->active = FALSE;
queue_remove(list, cur_thread, thread_t, thread_list);
thread_unlock(cur_thread);
@@ -323,6 +342,7 @@ kern_return_t task_terminate(
task_unlock(task);
return KERN_FAILURE;
}
+ task_hold_locked(task);
task->active = FALSE;
task_unlock(task);
}
@@ -333,9 +353,8 @@ kern_return_t task_terminate(
* If this is the current task, the current thread will
* be left running.
*/
- ipc_task_disable(task);
- (void) task_hold(task);
(void) task_dowait(task,TRUE); /* may block */
+ ipc_task_disable(task);
/*
* Terminate each thread in the task.
@@ -358,7 +377,7 @@ kern_return_t task_terminate(
task_unlock(task);
thread_force_terminate(thread);
thread_deallocate(thread);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
task_lock(task);
}
task_unlock(task);
@@ -400,20 +419,18 @@ kern_return_t task_terminate(
* Suspend execution of the specified task.
* This is a recursive-style suspension of the task, a count of
* suspends is maintained.
+ *
+ * CONDITIONS: the task is locked and active.
*/
-kern_return_t task_hold(
- register task_t task)
+void task_hold_locked(
+ task_t task)
{
- register queue_head_t *list;
- register thread_t thread, cur_thread;
+ queue_head_t *list;
+ thread_t thread, cur_thread;
- cur_thread = current_thread();
+ assert(task->active);
- task_lock(task);
- if (!task->active) {
- task_unlock(task);
- return KERN_FAILURE;
- }
+ cur_thread = current_thread();
task->suspend_count++;
@@ -427,6 +444,26 @@ kern_return_t task_hold(
if (thread != cur_thread)
thread_hold(thread);
}
+}
+
+/*
+ * task_hold:
+ *
+ * Suspend execution of the specified task.
+ * This is a recursive-style suspension of the task, a count of
+ * suspends is maintained.
+ */
+kern_return_t task_hold(
+ task_t task)
+{
+ task_lock(task);
+ if (!task->active) {
+ task_unlock(task);
+ return KERN_FAILURE;
+ }
+
+ task_hold_locked(task);
+
task_unlock(task);
return KERN_SUCCESS;
}
@@ -441,12 +478,12 @@ kern_return_t task_hold(
* must_wait is true.
*/
kern_return_t task_dowait(
- register task_t task,
+ task_t task,
boolean_t must_wait)
{
- register queue_head_t *list;
- register thread_t thread, cur_thread, prev_thread;
- register kern_return_t ret = KERN_SUCCESS;
+ queue_head_t *list;
+ thread_t thread, cur_thread, prev_thread;
+ kern_return_t ret = KERN_SUCCESS;
/*
* Iterate through all the threads.
@@ -493,10 +530,10 @@ kern_return_t task_dowait(
}
kern_return_t task_release(
- register task_t task)
+ task_t task)
{
- register queue_head_t *list;
- register thread_t thread, next;
+ queue_head_t *list;
+ thread_t thread, next;
task_lock(task);
if (!task->active) {
@@ -624,9 +661,9 @@ kern_return_t task_threads(
}
kern_return_t task_suspend(
- register task_t task)
+ task_t task)
{
- register boolean_t hold;
+ boolean_t hold;
if (task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
@@ -675,9 +712,9 @@ kern_return_t task_suspend(
}
kern_return_t task_resume(
- register task_t task)
+ task_t task)
{
- register boolean_t release;
+ boolean_t release;
if (task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
@@ -717,7 +754,7 @@ kern_return_t task_info(
switch (flavor) {
case TASK_BASIC_INFO:
{
- register task_basic_info_t basic_info;
+ task_basic_info_t basic_info;
/* Allow *task_info_count to be two words smaller than
the usual amount, because creation_time is a new member
@@ -746,7 +783,8 @@ kern_return_t task_info(
= task->total_system_time.seconds;
basic_info->system_time.microseconds
= task->total_system_time.microseconds;
- basic_info->creation_time = task->creation_time;
+ read_time_stamp(&task->creation_time,
+ &basic_info->creation_time);
task_unlock(task);
if (*task_info_count > TASK_BASIC_INFO_COUNT)
@@ -756,7 +794,7 @@ kern_return_t task_info(
case TASK_EVENTS_INFO:
{
- register task_events_info_t event_info;
+ task_events_info_t event_info;
if (*task_info_count < TASK_EVENTS_INFO_COUNT) {
return KERN_INVALID_ARGUMENT;
@@ -764,7 +802,7 @@ kern_return_t task_info(
event_info = (task_events_info_t) task_info_out;
- task_lock(&task);
+ task_lock(task);
event_info->faults = task->faults;
event_info->zero_fills = task->zero_fills;
event_info->reactivations = task->reactivations;
@@ -772,7 +810,7 @@ kern_return_t task_info(
event_info->cow_faults = task->cow_faults;
event_info->messages_sent = task->messages_sent;
event_info->messages_received = task->messages_received;
- task_unlock(&task);
+ task_unlock(task);
*task_info_count = TASK_EVENTS_INFO_COUNT;
break;
@@ -780,8 +818,8 @@ kern_return_t task_info(
case TASK_THREAD_TIMES_INFO:
{
- register task_thread_times_info_t times_info;
- register thread_t thread;
+ task_thread_times_info_t times_info;
+ thread_t thread;
if (*task_info_count < TASK_THREAD_TIMES_INFO_COUNT) {
return KERN_INVALID_ARGUMENT;
@@ -837,9 +875,9 @@ task_assign(
boolean_t assign_threads)
{
kern_return_t ret = KERN_SUCCESS;
- register thread_t thread, prev_thread;
- register queue_head_t *list;
- register processor_set_t pset;
+ thread_t thread, prev_thread;
+ queue_head_t *list;
+ processor_set_t pset;
if (task == TASK_NULL || new_pset == PROCESSOR_SET_NULL) {
return KERN_INVALID_ARGUMENT;
@@ -855,7 +893,7 @@ task_assign(
task->assign_active = TRUE;
assert_wait((event_t)&task->assign_active, TRUE);
task_unlock(task);
- thread_block((void (*)()) 0);
+ thread_block(thread_no_continuation);
task_lock(task);
}
@@ -1026,6 +1064,9 @@ kern_return_t task_get_assignment(
task_t task,
processor_set_t *pset)
{
+ if (task == TASK_NULL)
+ return KERN_INVALID_ARGUMENT;
+
if (!task->active)
return KERN_FAILURE;
@@ -1055,8 +1096,8 @@ task_priority(
task->priority = priority;
if (change_threads) {
- register thread_t thread;
- register queue_head_t *list;
+ thread_t thread;
+ queue_head_t *list;
list = &task->thread_list;
queue_iterate(list, thread, thread_t, thread_list) {
@@ -1071,6 +1112,22 @@ task_priority(
}
/*
+ * task_set_name
+ *
+ * Set the name of task TASK to NAME. This is a debugging aid.
+ * NAME will be used in error messages printed by the kernel.
+ */
+kern_return_t
+task_set_name(
+ task_t task,
+ kernel_debug_name_t name)
+{
+ strncpy(task->name, name, sizeof task->name - 1);
+ task->name[sizeof task->name - 1] = '\0';
+ return KERN_SUCCESS;
+}
+
+/*
* task_collect_scan:
*
* Attempt to free resources owned by tasks.
@@ -1078,7 +1135,7 @@ task_priority(
void task_collect_scan(void)
{
- register task_t task, prev_task;
+ task_t task, prev_task;
processor_set_t pset, prev_pset;
prev_task = TASK_NULL;
@@ -1209,6 +1266,27 @@ task_ras_control(
break;
}
task_unlock(task);
-#endif
+#endif /* FAST_TAS */
return ret;
}
+
+/*
+ * register_new_task_notification
+ *
+ * Register a port to which a notification about newly created
+ * tasks are sent.
+ */
+kern_return_t
+register_new_task_notification(
+ const host_t host,
+ ipc_port_t notification)
+{
+ if (host == HOST_NULL)
+ return KERN_INVALID_HOST;
+
+ if (new_task_notification != NULL)
+ return KERN_NO_ACCESS;
+
+ new_task_notification = notification;
+ return KERN_SUCCESS;
+}
diff --git a/kern/task.h b/kern/task.h
index 9bfea571..2a4c28fc 100644
--- a/kern/task.h
+++ b/kern/task.h
@@ -39,6 +39,7 @@
#include <mach/time_value.h>
#include <mach/mach_param.h>
#include <mach/task_info.h>
+#include <mach_debug/mach_debug_types.h>
#include <kern/kern_types.h>
#include <kern/lock.h>
#include <kern/queue.h>
@@ -48,11 +49,22 @@
#include <vm/vm_types.h>
#include <machine/task.h>
+/*
+ * Task name buffer size. The size is chosen so that struct task fits
+ * into three cache lines. The size of a cache line on a typical CPU
+ * is 64 bytes.
+ */
+#define TASK_NAME_SIZE 32
+
struct task {
/* Synchronization/destruction information */
decl_simple_lock_data(,lock) /* Task's lock */
int ref_count; /* Number of references to me */
- boolean_t active; /* Task has not been terminated */
+
+ /* Flags */
+ unsigned int active:1, /* Task has not been terminated */
+ /* boolean_t */ may_assign:1, /* can assigned pset be changed? */
+ assign_active:1; /* waiting for may_assign */
/* Miscellaneous */
vm_map_t map; /* Address space description */
@@ -63,8 +75,6 @@ struct task {
queue_head_t thread_list; /* list of threads */
int thread_count; /* number of threads */
processor_set_t processor_set; /* processor set for new threads */
- boolean_t may_assign; /* can assigned pset be changed? */
- boolean_t assign_active; /* waiting for may_assign */
/* User-visible scheduling information */
int user_stop_count; /* outstanding stops */
@@ -111,6 +121,8 @@ struct task {
natural_t cow_faults; /* copy-on-write faults counter */
natural_t messages_sent; /* messages sent counter */
natural_t messages_received; /* messages received counter */
+
+ char name[TASK_NAME_SIZE];
};
#define task_lock(task) simple_lock(&(task)->lock)
@@ -158,6 +170,9 @@ extern kern_return_t task_assign(
extern kern_return_t task_assign_default(
task_t task,
boolean_t assign_threads);
+extern kern_return_t task_set_name(
+ task_t task,
+ kernel_debug_name_t name);
extern void consider_task_collect(void);
/*
@@ -167,6 +182,7 @@ extern void consider_task_collect(void);
extern void task_init(void);
extern void task_reference(task_t);
extern void task_deallocate(task_t);
+extern void task_hold_locked(task_t);
extern kern_return_t task_hold(task_t);
extern kern_return_t task_dowait(task_t, boolean_t);
extern kern_return_t task_release(task_t);
diff --git a/kern/task_notify.cli b/kern/task_notify.cli
new file mode 100644
index 00000000..c6c85d99
--- /dev/null
+++ b/kern/task_notify.cli
@@ -0,0 +1,7 @@
+/* XXX */
+
+/* This is a client presentation file. */
+
+#define KERNEL_USER 1
+
+#include <mach/task_notify.defs>
diff --git a/kern/thread.c b/kern/thread.c
index 79f526a2..ce44ed14 100644
--- a/kern/thread.c
+++ b/kern/thread.c
@@ -57,9 +57,11 @@
#include <kern/slab.h>
#include <kern/mach_clock.h>
#include <vm/vm_kern.h>
+#include <vm/vm_user.h>
#include <ipc/ipc_kmsg.h>
#include <ipc/ipc_port.h>
#include <ipc/mach_msg.h>
+#include <ipc/mach_port.h>
#include <machine/machspl.h> /* for splsched */
#include <machine/pcb.h>
#include <machine/thread.h> /* for MACHINE_STACK */
@@ -68,19 +70,15 @@ thread_t active_threads[NCPUS];
vm_offset_t active_stacks[NCPUS];
struct kmem_cache thread_cache;
+struct kmem_cache thread_stack_cache;
queue_head_t reaper_queue;
decl_simple_lock_data(, reaper_lock)
-extern void pcb_module_init(void);
-
/* private */
struct thread thread_template;
#if MACH_DEBUG
-void stack_init(vm_offset_t stack); /* forward */
-void stack_finalize(vm_offset_t stack); /* forward */
-
#define STACK_MARKER 0xdeadbeefU
boolean_t stack_check_usage = FALSE;
decl_simple_lock_data(, stack_usage_lock)
@@ -127,10 +125,6 @@ vm_offset_t stack_free_list; /* splsched only */
unsigned int stack_free_count = 0; /* splsched only */
unsigned int stack_free_limit = 1; /* patchable */
-unsigned int stack_alloc_hits = 0; /* debugging */
-unsigned int stack_alloc_misses = 0; /* debugging */
-unsigned int stack_alloc_max = 0; /* debugging */
-
/*
* The next field is at the base of the stack,
* so the low end is left unsullied.
@@ -149,7 +143,7 @@ boolean_t stack_alloc_try(
thread_t thread,
void (*resume)(thread_t))
{
- register vm_offset_t stack;
+ vm_offset_t stack;
stack_lock();
stack = stack_free_list;
@@ -163,10 +157,10 @@ boolean_t stack_alloc_try(
if (stack != 0) {
stack_attach(thread, stack, resume);
- stack_alloc_hits++;
+ counter(c_stack_alloc_hits++);
return TRUE;
} else {
- stack_alloc_misses++;
+ counter(c_stack_alloc_misses++);
return FALSE;
}
}
@@ -178,7 +172,7 @@ boolean_t stack_alloc_try(
* May block.
*/
-void stack_alloc(
+kern_return_t stack_alloc(
thread_t thread,
void (*resume)(thread_t))
{
@@ -202,22 +196,15 @@ void stack_alloc(
(void) splx(s);
if (stack == 0) {
- /*
- * Kernel stacks should be naturally aligned,
- * so that it is easy to find the starting/ending
- * addresses of a stack given an address in the middle.
- */
-
- if (kmem_alloc_aligned(kmem_map, &stack, KERNEL_STACK_SIZE)
- != KERN_SUCCESS)
- panic("stack_alloc");
-
+ stack = kmem_cache_alloc(&thread_stack_cache);
+ assert(stack != 0);
#if MACH_DEBUG
stack_init(stack);
#endif /* MACH_DEBUG */
}
stack_attach(thread, stack, resume);
+ return KERN_SUCCESS;
}
/*
@@ -230,7 +217,7 @@ void stack_alloc(
void stack_free(
thread_t thread)
{
- register vm_offset_t stack;
+ vm_offset_t stack;
stack = stack_detach(thread);
@@ -238,8 +225,11 @@ void stack_free(
stack_lock();
stack_next(stack) = stack_free_list;
stack_free_list = stack;
- if (++stack_free_count > stack_alloc_max)
- stack_alloc_max = stack_free_count;
+ stack_free_count += 1;
+#if MACH_COUNTERS
+ if (stack_free_count > c_stack_alloc_max)
+ c_stack_alloc_max = stack_free_count;
+#endif /* MACH_COUNTERS */
stack_unlock();
}
}
@@ -253,7 +243,7 @@ void stack_free(
void stack_collect(void)
{
- register vm_offset_t stack;
+ vm_offset_t stack;
spl_t s;
s = splsched();
@@ -268,7 +258,7 @@ void stack_collect(void)
#if MACH_DEBUG
stack_finalize(stack);
#endif /* MACH_DEBUG */
- kmem_free(kmem_map, stack, KERNEL_STACK_SIZE);
+ kmem_cache_free(&thread_stack_cache, stack);
s = splsched();
stack_lock();
@@ -285,7 +275,7 @@ void stack_collect(void)
*/
void stack_privilege(
- register thread_t thread)
+ thread_t thread)
{
/*
* This implementation only works for the current thread.
@@ -301,7 +291,15 @@ void stack_privilege(
void thread_init(void)
{
kmem_cache_init(&thread_cache, "thread", sizeof(struct thread), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
+ /*
+ * Kernel stacks should be naturally aligned,
+ * so that it is easy to find the starting/ending
+ * addresses of a stack given an address in the middle.
+ */
+ kmem_cache_init(&thread_stack_cache, "thread_stack",
+ KERNEL_STACK_SIZE, KERNEL_STACK_SIZE,
+ NULL, 0);
/*
* Fill in a template thread for fast initialization.
@@ -398,11 +396,11 @@ void thread_init(void)
}
kern_return_t thread_create(
- register task_t parent_task,
+ task_t parent_task,
thread_t *child_thread) /* OUT */
{
- register thread_t new_thread;
- register processor_set_t pset;
+ thread_t new_thread;
+ processor_set_t pset;
if (parent_task == TASK_NULL)
return KERN_INVALID_ARGUMENT;
@@ -433,7 +431,7 @@ kern_return_t thread_create(
* Create a pcb. The kernel stack is created later,
* when the thread is swapped-in.
*/
- pcb_init(new_thread);
+ pcb_init(parent_task, new_thread);
ipc_thread_init(new_thread);
@@ -575,11 +573,11 @@ kern_return_t thread_create(
unsigned int thread_deallocate_stack = 0;
void thread_deallocate(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
- register task_t task;
- register processor_set_t pset;
+ task_t task;
+ processor_set_t pset;
time_value_t user_time, system_time;
@@ -710,7 +708,7 @@ void thread_deallocate(
}
void thread_reference(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
@@ -744,10 +742,10 @@ void thread_reference(
* since it needs a kernel stack to execute.)
*/
kern_return_t thread_terminate(
- register thread_t thread)
+ thread_t thread)
{
- register thread_t cur_thread = current_thread();
- register task_t cur_task;
+ thread_t cur_thread = current_thread();
+ task_t cur_task;
spl_t s;
if (thread == THREAD_NULL)
@@ -850,6 +848,28 @@ kern_return_t thread_terminate(
return KERN_SUCCESS;
}
+kern_return_t thread_terminate_release(
+ thread_t thread,
+ task_t task,
+ mach_port_t thread_name,
+ mach_port_t reply_port,
+ vm_offset_t address,
+ vm_size_t size)
+{
+ if (task == NULL)
+ return KERN_INVALID_ARGUMENT;
+
+ mach_port_deallocate(task->itk_space, thread_name);
+
+ if (reply_port != MACH_PORT_NULL)
+ mach_port_destroy(task->itk_space, reply_port);
+
+ if ((address != 0) || (size != 0))
+ vm_deallocate(task->map, address, size);
+
+ return thread_terminate(thread);
+}
+
/*
* thread_force_terminate:
*
@@ -859,7 +879,7 @@ kern_return_t thread_terminate(
*/
void
thread_force_terminate(
- register thread_t thread)
+ thread_t thread)
{
boolean_t deallocate_here;
spl_t s;
@@ -901,11 +921,11 @@ thread_force_terminate(
*
*/
kern_return_t thread_halt(
- register thread_t thread,
+ thread_t thread,
boolean_t must_halt)
{
- register thread_t cur_thread = current_thread();
- register kern_return_t ret;
+ thread_t cur_thread = current_thread();
+ kern_return_t ret;
spl_t s;
if (thread == cur_thread)
@@ -949,7 +969,7 @@ kern_return_t thread_halt(
* operation can never cause a deadlock.)
*/
if (cur_thread->ast & AST_HALT) {
- thread_wakeup_with_result((event_t)&cur_thread->wake_active,
+ thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(cur_thread),
THREAD_INTERRUPTED);
thread_unlock(thread);
thread_unlock(cur_thread);
@@ -987,7 +1007,7 @@ kern_return_t thread_halt(
*/
while ((thread->ast & AST_HALT) && (!(thread->state & TH_HALTED))) {
thread->wake_active = TRUE;
- thread_sleep((event_t) &thread->wake_active,
+ thread_sleep(TH_EV_WAKE_ACTIVE(thread),
simple_lock_addr(thread->lock), TRUE);
if (thread->state & TH_HALTED) {
@@ -1026,7 +1046,7 @@ kern_return_t thread_halt(
s = splsched();
thread_lock(thread);
thread_ast_clear(thread, AST_HALT);
- thread_wakeup_with_result((event_t)&thread->wake_active,
+ thread_wakeup_with_result(TH_EV_WAKE_ACTIVE(thread),
THREAD_INTERRUPTED);
thread_unlock(thread);
(void) splx(s);
@@ -1104,7 +1124,7 @@ kern_return_t thread_halt(
}
}
-void walking_zombie(void)
+void __attribute__((noreturn)) walking_zombie(void)
{
panic("the zombie walks!");
}
@@ -1113,9 +1133,9 @@ void walking_zombie(void)
* Thread calls this routine on exit from the kernel when it
* notices a halt request.
*/
-void thread_halt_self(void)
+void thread_halt_self(continuation_t continuation)
{
- register thread_t thread = current_thread();
+ thread_t thread = current_thread();
spl_t s;
if (thread->ast & AST_TERMINATE) {
@@ -1130,7 +1150,7 @@ void thread_halt_self(void)
s = splsched();
simple_lock(&reaper_lock);
- enqueue_tail(&reaper_queue, (queue_entry_t) thread);
+ enqueue_tail(&reaper_queue, &(thread->links));
simple_unlock(&reaper_lock);
thread_lock(thread);
@@ -1154,7 +1174,7 @@ void thread_halt_self(void)
thread_unlock(thread);
splx(s);
counter(c_thread_halt_self_block++);
- thread_block(thread_exception_return);
+ thread_block(continuation);
/*
* thread_release resets TH_HALTED.
*/
@@ -1169,7 +1189,7 @@ void thread_halt_self(void)
* suspends is maintained.
*/
void thread_hold(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
@@ -1191,11 +1211,11 @@ void thread_hold(
*/
kern_return_t
thread_dowait(
- register thread_t thread,
+ thread_t thread,
boolean_t must_halt)
{
- register boolean_t need_wakeup;
- register kern_return_t ret = KERN_SUCCESS;
+ boolean_t need_wakeup;
+ kern_return_t ret = KERN_SUCCESS;
spl_t s;
if (thread == current_thread())
@@ -1265,7 +1285,7 @@ thread_dowait(
* Check for failure if interrupted.
*/
thread->wake_active = TRUE;
- thread_sleep((event_t) &thread->wake_active,
+ thread_sleep(TH_EV_WAKE_ACTIVE(thread),
simple_lock_addr(thread->lock), TRUE);
thread_lock(thread);
if ((current_thread()->wait_result != THREAD_AWAKENED) &&
@@ -1289,13 +1309,13 @@ thread_dowait(
(void) splx(s);
if (need_wakeup)
- thread_wakeup((event_t) &thread->wake_active);
+ thread_wakeup(TH_EV_WAKE_ACTIVE(thread));
return ret;
}
void thread_release(
- register thread_t thread)
+ thread_t thread)
{
spl_t s;
@@ -1314,9 +1334,9 @@ void thread_release(
}
kern_return_t thread_suspend(
- register thread_t thread)
+ thread_t thread)
{
- register boolean_t hold;
+ boolean_t hold;
spl_t spl;
if (thread == THREAD_NULL)
@@ -1327,9 +1347,9 @@ kern_return_t thread_suspend(
thread_lock(thread);
/* Wait for thread to get interruptible */
while (thread->state & TH_UNINT) {
- assert_wait(&thread->state, TRUE);
+ assert_wait(TH_EV_STATE(thread), TRUE);
thread_unlock(thread);
- thread_block(NULL);
+ thread_block(thread_no_continuation);
thread_lock(thread);
}
if (thread->user_stop_count++ == 0) {
@@ -1360,9 +1380,9 @@ kern_return_t thread_suspend(
kern_return_t thread_resume(
- register thread_t thread)
+ thread_t thread)
{
- register kern_return_t ret;
+ kern_return_t ret;
spl_t s;
if (thread == THREAD_NULL)
@@ -1398,7 +1418,7 @@ kern_return_t thread_resume(
* Return thread's machine-dependent state.
*/
kern_return_t thread_get_state(
- register thread_t thread,
+ thread_t thread,
int flavor,
thread_state_t old_state, /* pointer to OUT array */
natural_t *old_state_count) /*IN/OUT*/
@@ -1422,7 +1442,7 @@ kern_return_t thread_get_state(
* Change thread's machine-dependent state.
*/
kern_return_t thread_set_state(
- register thread_t thread,
+ thread_t thread,
int flavor,
thread_state_t new_state,
natural_t new_state_count)
@@ -1443,7 +1463,7 @@ kern_return_t thread_set_state(
}
kern_return_t thread_info(
- register thread_t thread,
+ thread_t thread,
int flavor,
thread_info_t thread_info_out, /* pointer to OUT array */
natural_t *thread_info_count) /*IN/OUT*/
@@ -1455,7 +1475,7 @@ kern_return_t thread_info(
return KERN_INVALID_ARGUMENT;
if (flavor == THREAD_BASIC_INFO) {
- register thread_basic_info_t basic_info;
+ thread_basic_info_t basic_info;
/* Allow *thread_info_count to be one smaller than the
usual amount, because creation_time is a new member
@@ -1484,7 +1504,8 @@ kern_return_t thread_info(
&basic_info->system_time);
basic_info->base_priority = thread->priority;
basic_info->cur_priority = thread->sched_pri;
- basic_info->creation_time = thread->creation_time;
+ read_time_stamp(&thread->creation_time,
+ &basic_info->creation_time);
/*
* To calculate cpu_usage, first correct for timer rate,
@@ -1541,7 +1562,7 @@ kern_return_t thread_info(
return KERN_SUCCESS;
}
else if (flavor == THREAD_SCHED_INFO) {
- register thread_sched_info_t sched_info;
+ thread_sched_info_t sched_info;
if (*thread_info_count < THREAD_SCHED_INFO_COUNT) {
return KERN_INVALID_ARGUMENT;
@@ -1583,7 +1604,7 @@ kern_return_t thread_info(
}
kern_return_t thread_abort(
- register thread_t thread)
+ thread_t thread)
{
if (thread == THREAD_NULL || thread == current_thread()) {
return KERN_INVALID_ARGUMENT;
@@ -1648,9 +1669,13 @@ thread_t kernel_thread(
continuation_t start,
void * arg)
{
+ kern_return_t kr;
thread_t thread;
- (void) thread_create(task, &thread);
+ kr = thread_create(task, &thread);
+ if (kr != KERN_SUCCESS)
+ return THREAD_NULL;
+
/* release "extra" ref that thread_create gave us */
thread_deallocate(thread);
thread_start(thread, start);
@@ -1674,10 +1699,10 @@ thread_t kernel_thread(
* This kernel thread runs forever looking for threads to destroy
* (when they request that they be destroyed, of course).
*/
-void reaper_thread_continue(void)
+void __attribute__((noreturn)) reaper_thread_continue(void)
{
for (;;) {
- register thread_t thread;
+ thread_t thread;
spl_t s;
s = splsched();
@@ -1792,12 +1817,12 @@ thread_unfreeze(
void
thread_doassign(
- register thread_t thread,
- register processor_set_t new_pset,
+ thread_t thread,
+ processor_set_t new_pset,
boolean_t release_freeze)
{
- register processor_set_t pset;
- register boolean_t old_empty, new_empty;
+ processor_set_t pset;
+ boolean_t old_empty, new_empty;
boolean_t recompute_pri = FALSE;
spl_t s;
@@ -1955,6 +1980,9 @@ kern_return_t thread_get_assignment(
thread_t thread,
processor_set_t *pset)
{
+ if (thread == THREAD_NULL)
+ return KERN_INVALID_ARGUMENT;
+
*pset = thread->processor_set;
pset_reference(*pset);
return KERN_SUCCESS;
@@ -2100,8 +2128,8 @@ thread_policy(
int data)
{
#if MACH_FIXPRI
- register kern_return_t ret = KERN_SUCCESS;
- register int temp;
+ kern_return_t ret = KERN_SUCCESS;
+ int temp;
spl_t s;
#endif /* MACH_FIXPRI */
@@ -2217,7 +2245,6 @@ thread_wire(
void thread_collect_scan(void)
{
-#if 0
register thread_t thread, prev_thread;
processor_set_t pset, prev_pset;
@@ -2270,7 +2297,6 @@ void thread_collect_scan(void)
thread_deallocate(prev_thread);
if (prev_pset != PROCESSOR_SET_NULL)
pset_deallocate(prev_pset);
-#endif /* 0 */
}
boolean_t thread_collect_allowed = TRUE;
@@ -2304,7 +2330,7 @@ void consider_thread_collect(void)
#if MACH_DEBUG
vm_size_t stack_usage(
- register vm_offset_t stack)
+ vm_offset_t stack)
{
int i;
@@ -2321,7 +2347,7 @@ vm_size_t stack_usage(
*/
void stack_init(
- register vm_offset_t stack)
+ vm_offset_t stack)
{
if (stack_check_usage) {
int i;
@@ -2337,7 +2363,7 @@ void stack_init(
*/
void stack_finalize(
- register vm_offset_t stack)
+ vm_offset_t stack)
{
if (stack_check_usage) {
vm_size_t used = stack_usage(stack);
@@ -2430,8 +2456,8 @@ kern_return_t processor_set_stack_usage(
vm_size_t maxusage;
vm_offset_t maxstack;
- register thread_t *threads;
- register thread_t tmp_thread;
+ thread_t *threads;
+ thread_t tmp_thread;
unsigned int actual; /* this many things */
unsigned int i;
@@ -2549,7 +2575,7 @@ kern_return_t processor_set_stack_usage(
void
thread_stats(void)
{
- register thread_t thread;
+ thread_t thread;
int total = 0, rpcreply = 0;
queue_iterate(&default_pset.threads, thread, thread_t, pset_threads) {
diff --git a/kern/thread.h b/kern/thread.h
index 3959dfce..7106fd2d 100644
--- a/kern/thread.h
+++ b/kern/thread.h
@@ -70,6 +70,22 @@ struct thread {
task_t task; /* Task to which I belong */
queue_chain_t thread_list; /* list of threads in task */
+ /* Flags */
+ /* The flags are grouped here, but documented at the original
+ position. */
+ union {
+ struct {
+ unsigned state:16;
+ unsigned wake_active:1;
+ unsigned vm_privilege:1;
+ unsigned active:1;
+ };
+ event_t event_key;
+/* These keys can be used with thread_wakeup and friends. */
+#define TH_EV_WAKE_ACTIVE(t) ((event_t) (&(t)->event_key + 0))
+#define TH_EV_STATE(t) ((event_t) (&(t)->event_key + 1))
+ };
+
/* Thread bookkeeping */
queue_chain_t pset_threads; /* list of all threads in proc set*/
@@ -84,7 +100,7 @@ struct thread {
vm_offset_t stack_privilege;/* reserved kernel stack */
/* Swapping information */
- void (*swap_func)(); /* start here after swapin */
+ continuation_t swap_func; /* start here after swapin */
/* Blocking information */
event_t wait_event; /* event we are waiting on */
@@ -92,9 +108,10 @@ struct thread {
kern_return_t wait_result; /* outcome of wait -
may be examined by this thread
WITHOUT locking */
- boolean_t wake_active; /* someone is waiting for this
+ /* Defined above */
+ /* boolean_t wake_active; someone is waiting for this
thread to become suspended */
- int state; /* Thread state: */
+ /* int state; Thread state: */
/*
* Thread states [bits or'ed]
*/
@@ -129,7 +146,8 @@ struct thread {
/* VM global variables */
vm_offset_t recover; /* page fault recovery (copyin/out) */
- boolean_t vm_privilege; /* Can use reserved memory? */
+ /* Defined above */
+ /* boolean_t vm_privilege; Can use reserved memory? */
/* User-visible scheduling state */
int user_stop_count; /* outstanding stops */
@@ -194,7 +212,8 @@ struct thread {
timer_elt_data_t depress_timer; /* timer for priority depression */
/* Ast/Halt data structures */
- boolean_t active; /* how alive is the thread */
+ /* Defined above */
+ /* boolean_t active; how alive is the thread */
int ast; /* ast's needed. See ast.h */
/* Processor data structures */
@@ -259,6 +278,13 @@ extern kern_return_t thread_create(
thread_t *child_thread);
extern kern_return_t thread_terminate(
thread_t thread);
+extern kern_return_t thread_terminate_release(
+ thread_t thread,
+ task_t task,
+ mach_port_t thread_name,
+ mach_port_t reply_port,
+ vm_offset_t address,
+ vm_size_t size);
extern kern_return_t thread_suspend(
thread_t thread);
extern kern_return_t thread_resume(
@@ -336,16 +362,14 @@ extern void thread_release(thread_t);
extern kern_return_t thread_halt(
thread_t thread,
boolean_t must_halt);
-extern void thread_halt_self(void);
+extern void thread_halt_self(continuation_t);
extern void thread_force_terminate(thread_t);
-extern void thread_set_own_priority(
- int priority);
extern thread_t kernel_thread(
task_t task,
void (*start)(void),
void * arg);
-extern void reaper_thread(void);
+extern void reaper_thread(void) __attribute__((noreturn));
#if MACH_HOST
extern void thread_freeze(
@@ -384,4 +408,9 @@ extern void thread_unfreeze(
#define current_space() (current_task()->itk_space)
#define current_map() (current_task()->map)
+#if MACH_DEBUG
+void stack_init(vm_offset_t stack);
+void stack_finalize(vm_offset_t stack);
+#endif /* MACH_DEBUG */
+
#endif /* _KERN_THREAD_H_ */
diff --git a/kern/thread_swap.c b/kern/thread_swap.c
index f29bd5b7..20ad0409 100644
--- a/kern/thread_swap.c
+++ b/kern/thread_swap.c
@@ -86,8 +86,7 @@ void swapper_init(void)
* our callers have already tried that route.
*/
-void thread_swapin(thread)
- thread_t thread;
+void thread_swapin(thread_t thread)
{
switch (thread->state & TH_SWAP_STATE) {
case TH_SWAPPED:
@@ -97,7 +96,7 @@ void thread_swapin(thread)
thread->state = (thread->state & ~TH_SWAP_STATE)
| TH_SW_COMING_IN;
swapper_lock();
- enqueue_tail(&swapin_queue, (queue_entry_t) thread);
+ enqueue_tail(&swapin_queue, &(thread->links));
swapper_unlock();
thread_wakeup((event_t) &swapin_queue);
break;
@@ -124,16 +123,18 @@ void thread_swapin(thread)
* it on a run queue. No locks should be held on entry, as it is
* likely that this routine will sleep (waiting for stack allocation).
*/
-void thread_doswapin(thread)
- register thread_t thread;
+kern_return_t thread_doswapin(thread_t thread)
{
+ kern_return_t kr;
spl_t s;
/*
* Allocate the kernel stack.
*/
- stack_alloc(thread, thread_continue);
+ kr = stack_alloc(thread, thread_continue);
+ if (kr != KERN_SUCCESS)
+ return kr;
/*
* Place on run queue.
@@ -146,6 +147,7 @@ void thread_doswapin(thread)
thread_setrun(thread, TRUE);
thread_unlock(thread);
(void) splx(s);
+ return KERN_SUCCESS;
}
/*
@@ -154,10 +156,10 @@ void thread_doswapin(thread)
* This procedure executes as a kernel thread. Threads that need to
* be swapped in are swapped in by this thread.
*/
-void swapin_thread_continue(void)
+void __attribute__((noreturn)) swapin_thread_continue(void)
{
for (;;) {
- register thread_t thread;
+ thread_t thread;
spl_t s;
s = splsched();
@@ -165,13 +167,20 @@ void swapin_thread_continue(void)
while ((thread = (thread_t) dequeue_head(&swapin_queue))
!= THREAD_NULL) {
+ kern_return_t kr;
swapper_unlock();
(void) splx(s);
- thread_doswapin(thread); /* may block */
+ kr = thread_doswapin(thread); /* may block */
s = splsched();
swapper_lock();
+
+ if (kr != KERN_SUCCESS) {
+ enqueue_head(&swapin_queue,
+ (queue_entry_t) thread);
+ break;
+ }
}
assert_wait((event_t) &swapin_queue, FALSE);
diff --git a/kern/thread_swap.h b/kern/thread_swap.h
index 31130301..d032accf 100644
--- a/kern/thread_swap.h
+++ b/kern/thread_swap.h
@@ -37,8 +37,7 @@
*/
extern void swapper_init(void);
extern void thread_swapin(thread_t thread);
-extern void thread_doswapin(thread_t thread);
-extern void swapin_thread(void);
-extern void thread_swapout(thread_t thread);
+extern kern_return_t thread_doswapin(thread_t thread);
+extern void swapin_thread(void) __attribute__((noreturn));
#endif /* _KERN_THREAD_SWAP_H_ */
diff --git a/kern/time_stamp.c b/kern/time_stamp.c
index 22885b18..ee141a0e 100644
--- a/kern/time_stamp.c
+++ b/kern/time_stamp.c
@@ -32,31 +32,18 @@
/*
* ts.c - kern_timestamp system call.
*/
-#ifdef multimax
-#include <mmax/timer.h>
-#endif /* multimax */
-
-
-
kern_return_t
-kern_timestamp(tsp)
-struct tsval *tsp;
+kern_timestamp(struct tsval *tsp)
{
-#ifdef multimax
- struct tsval temp;
- temp.low_val = FRcounter;
- temp.high_val = 0;
-#else /* multimax */
/*
temp.low_val = 0;
temp.high_val = ts_tick_count;
*/
time_value_t temp;
temp = time;
-#endif /* multimax */
- if (copyout((char *)&temp,
- (char *)tsp,
+ if (copyout(&temp,
+ tsp,
sizeof(struct tsval)) != KERN_SUCCESS)
return(KERN_INVALID_ADDRESS);
return(KERN_SUCCESS);
@@ -66,10 +53,7 @@ struct tsval *tsp;
* Initialization procedure.
*/
-void timestamp_init()
+void timestamp_init(void)
{
-#ifdef multimax
-#else /* multimax */
ts_tick_count = 0;
-#endif /* multimax */
}
diff --git a/kern/timer.c b/kern/timer.c
index ec0524a8..79ada27e 100644
--- a/kern/timer.c
+++ b/kern/timer.c
@@ -33,24 +33,22 @@
#include <kern/cpu_number.h>
#include <kern/assert.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
timer_t current_timer[NCPUS];
timer_data_t kernel_timer[NCPUS];
-void timer_init(); /* forward */
-
/*
* init_timers initializes all non-thread timers and puts the
* service routine on the callout queue. All timers must be
* serviced by the callout routine once an hour.
*/
-void init_timers()
+void init_timers(void)
{
- register int i;
- register timer_t this_timer;
+ int i;
+ timer_t this_timer;
/*
* Initialize all the kernel timers and start the one
@@ -68,9 +66,7 @@ void init_timers()
/*
* timer_init initializes a single timer.
*/
-void timer_init(this_timer)
-register
-timer_t this_timer;
+void timer_init(timer_t this_timer)
{
this_timer->low_bits = 0;
this_timer->high_bits = 0;
@@ -94,8 +90,7 @@ timer_t this_timer;
* exactly once for each cpu during the boot sequence.
*/
void
-start_timer(timer)
-timer_t timer;
+start_timer(timer_t timer)
{
timer->tstamp = get_timestamp();
current_timer[cpu_number()] = timer;
@@ -108,8 +103,7 @@ timer_t timer;
* from user mode.
*/
void
-time_trap_uentry(ts)
-unsigned ts;
+time_trap_uentry(unsigned ts)
{
int elapsed;
int mycpu;
@@ -150,7 +144,7 @@ unsigned ts;
* user mode.
*/
void
-time_trap_uexit(ts)
+time_trap_uexit(int ts)
{
int elapsed;
int mycpu;
@@ -194,9 +188,9 @@ time_trap_uexit(ts)
* saved for time_int_exit.
*/
timer_t
-time_int_entry(ts,new_timer)
-unsigned ts;
-timer_t new_timer;
+time_int_entry(
+ unsigned ts,
+ timer_t new_timer)
{
int elapsed;
int mycpu;
@@ -235,9 +229,9 @@ timer_t new_timer;
* it.
*/
void
-time_int_exit(ts, old_timer)
-unsigned ts;
-timer_t old_timer;
+time_int_exit(
+ unsigned ts,
+ timer_t old_timer)
{
int elapsed;
int mycpu;
@@ -282,8 +276,7 @@ timer_t old_timer;
* Caller must lock out interrupts.
*/
void
-timer_switch(new_timer)
-timer_t new_timer;
+timer_switch(timer_t new_timer)
{
int elapsed;
int mycpu;
@@ -328,9 +321,7 @@ timer_t new_timer;
* timer_normalize normalizes the value of a timer. It is
* called only rarely, to make sure low_bits never overflows.
*/
-void timer_normalize(timer)
-register
-timer_t timer;
+void timer_normalize(timer_t timer)
{
unsigned int high_increment;
@@ -356,9 +347,9 @@ timer_t timer;
* Keep coherent with db_time_grab below.
*/
-static void timer_grab(timer, save)
-timer_t timer;
-timer_save_t save;
+static void timer_grab(
+ timer_t timer,
+ timer_save_t save)
{
#if MACH_ASSERT
unsigned int passes=0;
@@ -390,9 +381,9 @@ timer_save_t save;
* above.
*
*/
-void db_timer_grab(timer, save)
-timer_t timer;
-timer_save_t save;
+void db_timer_grab(
+ timer_t timer,
+ timer_save_t save)
{
/* Don't worry about coherency */
@@ -409,10 +400,9 @@ timer_save_t save;
*/
void
-timer_read(timer, tv)
-timer_t timer;
-register
-time_value_t *tv;
+timer_read(
+ timer_t timer,
+ time_value_t *tv)
{
timer_save_data_t temp;
@@ -436,13 +426,13 @@ time_value_t *tv;
*
* Needs to be kept coherent with thread_read_times ahead.
*/
-void thread_read_times(thread, user_time_p, system_time_p)
- thread_t thread;
- time_value_t *user_time_p;
- time_value_t *system_time_p;
+void thread_read_times(
+ thread_t thread,
+ time_value_t *user_time_p,
+ time_value_t *system_time_p)
{
timer_save_data_t temp;
- register timer_t timer;
+ timer_t timer;
timer = &thread->user_timer;
timer_grab(timer, &temp);
@@ -470,13 +460,13 @@ void thread_read_times(thread, user_time_p, system_time_p)
* thread_read_times above.
*
*/
-void db_thread_read_times(thread, user_time_p, system_time_p)
- thread_t thread;
- time_value_t *user_time_p;
- time_value_t *system_time_p;
+void db_thread_read_times(
+ thread_t thread,
+ time_value_t *user_time_p,
+ time_value_t *system_time_p)
{
timer_save_data_t temp;
- register timer_t timer;
+ timer_t timer;
timer = &thread->user_timer;
db_timer_grab(timer, &temp);
@@ -505,13 +495,12 @@ void db_thread_read_times(thread, user_time_p, system_time_p)
*/
unsigned
-timer_delta(timer, save)
-register
-timer_t timer;
-timer_save_t save;
+timer_delta(
+ timer_t timer,
+ timer_save_t save)
{
timer_save_data_t new_save;
- register unsigned result;
+ unsigned result;
timer_grab(timer,&new_save);
result = (new_save.high - save->high) * TIMER_HIGH_UNIT +
diff --git a/kern/timer.h b/kern/timer.h
index 817fa356..2f473cf8 100644
--- a/kern/timer.h
+++ b/kern/timer.h
@@ -27,7 +27,7 @@
#ifndef _KERN_TIMER_H_
#define _KERN_TIMER_H_
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#if STAT_TIME
/*
@@ -168,7 +168,7 @@ extern void time_int_exit(unsigned, timer_t);
#define TIMER_DELTA(timer, save, result) \
MACRO_BEGIN \
- register unsigned temp; \
+ unsigned temp; \
\
temp = (timer).low_bits; \
if ((save).high != (timer).high_bits_check) { \
@@ -182,4 +182,6 @@ MACRO_END
extern void init_timers(void);
+void timer_init(timer_t this_timer);
+
#endif /* _KERN_TIMER_H_ */
diff --git a/kern/xpr.c b/kern/xpr.c
index 465ba7bb..46cb2273 100644
--- a/kern/xpr.c
+++ b/kern/xpr.c
@@ -56,12 +56,16 @@ struct xprbuf *xprptr; /* Currently allocated xprbuf */
struct xprbuf *xprlast; /* Pointer to end of circular buffer */
/*VARARGS1*/
-void xpr(msg, arg1, arg2, arg3, arg4, arg5)
-char *msg;
-int arg1, arg2, arg3, arg4, arg5;
+void xpr(
+ char *msg,
+ int arg1,
+ int arg2,
+ int arg3,
+ int arg4,
+ int arg5)
{
- register spl_t s;
- register struct xprbuf *x;
+ spl_t s;
+ struct xprbuf *x;
/* If we aren't initialized, ignore trace request */
if (!xprenable || (xprptr == 0))
@@ -115,7 +119,7 @@ void xprbootstrap(void)
* the previous buffer contents.
*/
- memset((char *) addr, 0, size);
+ memset((void *) addr, 0, size);
}
xprbase = (struct xprbuf *) addr;
@@ -132,9 +136,8 @@ void xprinit(void)
#if MACH_KDB
#include <machine/setjmp.h>
+#include <ddb/db_output.h>
-
-extern void db_printf();
extern jmp_buf_t *db_recover;
/*
@@ -145,16 +148,16 @@ extern jmp_buf_t *db_recover;
* Called with arguments, it can dump xpr buffers in user tasks,
* assuming they use the same format as the kernel.
*/
-void xpr_dump(base, nbufs)
- struct xprbuf *base;
- int nbufs;
+void xpr_dump(
+ struct xprbuf *base,
+ int nbufs)
{
jmp_buf_t db_jmpbuf;
jmp_buf_t *prev;
struct xprbuf *last, *ptr;
- register struct xprbuf *x;
+ struct xprbuf *x;
int i;
- spl_t s;
+ spl_t s = s;
if (base == 0) {
base = xprbase;
diff --git a/kern/xpr.h b/kern/xpr.h
index 4a06216a..72f68170 100644
--- a/kern/xpr.h
+++ b/kern/xpr.h
@@ -34,7 +34,7 @@
* which will expand into the following code:
* if (xprflags & XPR_SYSCALLS)
* xpr("syscall: %d, 0x%x\n", syscallno, arg1);
- * Xpr will log the pointer to the printf string and up to 6 arguements,
+ * Xpr will log the pointer to the printf string and up to 6 arguments,
* along with a timestamp and cpuinfo (for multi-processor systems), into
* a circular buffer. The actual printf processing is delayed until after
* the buffer has been collected. It is assumed that the text/data segments
diff --git a/linux/Makefrag.am b/linux/Makefrag.am
index 0973f11c..1b690108 100644
--- a/linux/Makefrag.am
+++ b/linux/Makefrag.am
@@ -36,6 +36,11 @@ liblinux_a_CPPFLAGS = $(AM_CPPFLAGS) \
# Because of the use of `extern inline' in some Linux header files without
# corresponding text segment definitions, we must always optimize.
liblinux_a_CFLAGS = -O2 $(AM_CFLAGS)
+
+# See <http://lists.gnu.org/archive/html/bug-hurd/2006-01/msg00148.html>.
+liblinux_a_CFLAGS += \
+ -fno-strict-aliasing
+
# TODO. Do we really need `-traditional'?
liblinux_a_CCASFLAGS = $(AM_CCASFLAGS) \
-D__ASSEMBLY__ -traditional \
diff --git a/linux/dev/arch/i386/kernel/irq.c b/linux/dev/arch/i386/kernel/irq.c
index 68bf0c4b..7753814b 100644
--- a/linux/dev/arch/i386/kernel/irq.c
+++ b/linux/dev/arch/i386/kernel/irq.c
@@ -695,7 +695,7 @@ init_IRQ (void)
* Program counter 0 of 8253 to interrupt hz times per second.
*/
outb_p (PIT_C0 | PIT_SQUAREMODE | PIT_READMODE, PITCTL_PORT);
- outb_p (latch && 0xff, PITCTR0_PORT);
+ outb_p (latch & 0xff, PITCTR0_PORT);
outb (latch >> 8, PITCTR0_PORT);
/*
diff --git a/linux/dev/drivers/block/ahci.c b/linux/dev/drivers/block/ahci.c
index 2c573acb..b60f1a19 100644
--- a/linux/dev/drivers/block/ahci.c
+++ b/linux/dev/drivers/block/ahci.c
@@ -36,8 +36,8 @@
/* minor: 2 bits for device number, 6 bits for partition number. */
-#define MAX_PORTS 4
-#define PARTN_BITS 6
+#define MAX_PORTS 8
+#define PARTN_BITS 5
#define PARTN_MASK ((1<<PARTN_BITS)-1)
/* We need to use one DMA scatter element per physical page.
@@ -239,6 +239,8 @@ static struct port {
struct ahci_fis *fis;
struct ahci_cmd_tbl *prdtl;
+ struct hd_driveid id;
+ unsigned is_cd;
unsigned long long capacity; /* Nr of sectors */
u32 status; /* interrupt status */
unsigned cls; /* Command list maximum size.
@@ -264,9 +266,9 @@ static void ahci_end_request(int uptodate)
rq->errors = 0;
if (!uptodate) {
- printk("end_request: I/O error, dev %s, sector %lu\n",
- kdevname(rq->rq_dev), rq->sector);
- assert(0);
+ if (!rq->quiet)
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(rq->rq_dev), rq->sector);
}
for (bh = rq->bh; bh; )
@@ -286,7 +288,7 @@ static void ahci_end_request(int uptodate)
}
/* Push the request to the controler port */
-static void ahci_do_port_request(struct port *port, unsigned sector, struct request *rq)
+static void ahci_do_port_request(struct port *port, unsigned long long sector, struct request *rq)
{
struct ahci_command *command = port->command;
struct ahci_cmd_tbl *prdtl = port->prdtl;
@@ -321,8 +323,8 @@ static void ahci_do_port_request(struct port *port, unsigned sector, struct requ
fis_h2d->lba2 = sector >> 16;
fis_h2d->lba3 = sector >> 24;
- fis_h2d->lba4 = 0;
- fis_h2d->lba5 = 0;
+ fis_h2d->lba4 = sector >> 32;
+ fis_h2d->lba5 = sector >> 40;
fis_h2d->countl = rq->nr_sectors;
fis_h2d->counth = rq->nr_sectors >> 8;
@@ -360,7 +362,7 @@ static void ahci_do_request() /* invoked with cli() */
{
struct request *rq;
unsigned minor, unit;
- unsigned long block, blockend;
+ unsigned long long block, blockend;
struct port *port;
rq = CURRENT;
@@ -379,7 +381,7 @@ static void ahci_do_request() /* invoked with cli() */
minor = MINOR(rq->rq_dev);
unit = minor >> PARTN_BITS;
- if (unit > MAX_PORTS) {
+ if (unit >= MAX_PORTS) {
printk("bad ahci unit %u\n", unit);
goto kill_rq;
}
@@ -393,12 +395,16 @@ static void ahci_do_request() /* invoked with cli() */
/* And check end */
blockend = block + rq->nr_sectors;
if (blockend < block) {
- printk("bad blockend %lu vs %lu\n", blockend, block);
+ if (!rq->quiet)
+ printk("bad blockend %lu vs %lu\n", (unsigned long) blockend, (unsigned long) block);
goto kill_rq;
}
if (blockend > port->capacity) {
- printk("offset for %u was %lu\n", minor, port->part[minor & PARTN_MASK].start_sect);
- printk("bad access: block %lu, count= %lu\n", blockend, (unsigned long) port->capacity);
+ if (!rq->quiet)
+ {
+ printk("offset for %u was %lu\n", minor, port->part[minor & PARTN_MASK].start_sect);
+ printk("bad access: block %lu, count= %lu\n", (unsigned long) blockend, (unsigned long) port->capacity);
+ }
goto kill_rq;
}
@@ -553,103 +559,17 @@ static void identify_timeout(unsigned long data)
static struct timer_list identify_timer = { .function = identify_timeout };
-/* Probe one AHCI port */
-static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port)
+static int ahci_identify(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port, struct port *port, unsigned cmd)
{
- struct port *port;
- void *mem;
struct hd_driveid id;
- unsigned cls = ((readl(&ahci_host->cap) >> 8) & 0x1f) + 1;
- struct ahci_command *command;
- struct ahci_fis *fis;
- struct ahci_cmd_tbl *prdtl;
struct ahci_fis_h2d *fis_h2d;
- vm_size_t size =
- cls * sizeof(*command)
- + sizeof(*fis)
- + cls * sizeof(*prdtl);
- unsigned i;
+ struct ahci_command *command = port->command;
+ struct ahci_cmd_tbl *prdtl = port->prdtl;
+ unsigned long flags;
unsigned slot;
unsigned long first_part;
unsigned long long timeout;
- unsigned long flags;
-
- for (i = 0; i < MAX_PORTS; i++) {
- if (!ports[i].ahci_port)
- break;
- }
- if (i == MAX_PORTS)
- return;
- port = &ports[i];
-
- /* Has to be 1K-aligned */
- mem = vmalloc (size);
- if (!mem)
- return;
- assert (!(((unsigned long) mem) & (1024-1)));
- memset (mem, 0, size);
-
- port->ahci_host = ahci_host;
- port->ahci_port = ahci_port;
- port->cls = cls;
-
- port->command = command = mem;
- port->fis = fis = (void*) command + cls * sizeof(*command);
- port->prdtl = prdtl = (void*) fis + sizeof(*fis);
-
- /* Stop commands */
- writel(readl(&ahci_port->cmd) & ~PORT_CMD_START, &ahci_port->cmd);
- timeout = jiffies + WAIT_MAX;
- while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
- if (jiffies > timeout) {
- printk("sd%u: timeout waiting for list completion\n", port-ports);
- port->ahci_host = NULL;
- port->ahci_port = NULL;
- return;
- }
-
- writel(readl(&ahci_port->cmd) & ~PORT_CMD_FIS_RX, &ahci_port->cmd);
- timeout = jiffies + WAIT_MAX;
- while (readl(&ahci_port->cmd) & PORT_CMD_FIS_ON)
- if (jiffies > timeout) {
- printk("sd%u: timeout waiting for FIS completion\n", port-ports);
- port->ahci_host = NULL;
- port->ahci_port = NULL;
- return;
- }
-
- /* We don't support 64bit */
- /* Point controller to our buffers */
- writel(0, &ahci_port->clbu);
- writel(vmtophys((void*) command), &ahci_port->clb);
- writel(0, &ahci_port->fbu);
- writel(vmtophys((void*) fis), &ahci_port->fb);
-
- /* Clear any previous interrupts */
- writel(readl(&ahci_port->is), &ahci_port->is);
- writel(1 << (ahci_port - ahci_host->ports), &ahci_host->is);
-
- /* And activate them */
- writel(DEF_PORT_IRQ, &ahci_port->ie);
- writel(readl(&ahci_host->ghc) | HOST_IRQ_EN, &ahci_host->ghc);
-
- for (i = 0; i < cls; i++)
- {
- command[i].ctbau = 0;
- command[i].ctba = vmtophys((void*) &prdtl[i]);
- }
-
- /* Start commands */
- timeout = jiffies + WAIT_MAX;
- while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
- if (jiffies > timeout) {
- printk("sd%u: timeout waiting for list completion\n", port-ports);
- port->ahci_host = NULL;
- port->ahci_port = NULL;
- return;
- }
-
- writel(readl(&ahci_port->cmd) | PORT_CMD_FIS_RX | PORT_CMD_START, &ahci_port->cmd);
+ int ret = 0;
/* Identify device */
/* TODO: make this a request */
@@ -658,7 +578,7 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
fis_h2d = (void*) &prdtl[slot].cfis;
fis_h2d->fis_type = FIS_TYPE_REG_H2D;
fis_h2d->flags = 128;
- fis_h2d->command = WIN_IDENTIFY;
+ fis_h2d->command = cmd;
fis_h2d->device = 0;
/* Fetch the 512 identify data */
@@ -695,7 +615,7 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
printk("sd%u: timeout waiting for ready\n", port-ports);
port->ahci_host = NULL;
port->ahci_port = NULL;
- return;
+ return 3;
}
save_flags(flags);
@@ -718,22 +638,49 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
port->ahci_host = NULL;
port->ahci_port = NULL;
del_timer(&identify_timer);
- return;
+ restore_flags(flags);
+ return 3;
}
sleep_on(&port->q);
}
del_timer(&identify_timer);
restore_flags(flags);
- if (readl(&ahci_port->is) & PORT_IRQ_TF_ERR)
+ if ((port->status & PORT_IRQ_TF_ERR) || readl(&ahci_port->is) & PORT_IRQ_TF_ERR)
{
- printk("sd%u: identify error\n", port-ports);
+ /* Identify error */
port->capacity = 0;
port->lba48 = 0;
+ ret = 2;
} else {
+ memcpy(&port->id, &id, sizeof(id));
+ port->is_cd = 0;
+
ide_fixstring(id.model, sizeof(id.model), 1);
ide_fixstring(id.fw_rev, sizeof(id.fw_rev), 1);
ide_fixstring(id.serial_no, sizeof(id.serial_no), 1);
+ if (cmd == WIN_PIDENTIFY)
+ {
+ unsigned char type = (id.config >> 8) & 0x1f;
+
+ printk("sd%u: %s, ATAPI ", port - ports, id.model);
+ if (type == 5)
+ {
+ printk("unsupported CDROM drive\n");
+ port->is_cd = 1;
+ port->lba48 = 0;
+ port->capacity = 0;
+ }
+ else
+ {
+ printk("unsupported type %d\n", type);
+ port->lba48 = 0;
+ port->capacity = 0;
+ return 2;
+ }
+ return 0;
+ }
+
if (id.command_set_2 & (1U<<10))
{
port->lba48 = 1;
@@ -760,6 +707,106 @@ static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const vo
printk("sd%u: %s, %uMB w/%dkB Cache\n", port - ports, id.model, (unsigned) (port->capacity/2048), id.buf_size/2);
}
port->identify = 0;
+
+ return ret;
+}
+
+/* Probe one AHCI port */
+static void ahci_probe_port(const volatile struct ahci_host *ahci_host, const volatile struct ahci_port *ahci_port)
+{
+ struct port *port;
+ void *mem;
+ unsigned cls = ((readl(&ahci_host->cap) >> 8) & 0x1f) + 1;
+ struct ahci_command *command;
+ struct ahci_fis *fis;
+ struct ahci_cmd_tbl *prdtl;
+ vm_size_t size =
+ cls * sizeof(*command)
+ + sizeof(*fis)
+ + cls * sizeof(*prdtl);
+ unsigned i;
+ unsigned long long timeout;
+
+ for (i = 0; i < MAX_PORTS; i++) {
+ if (!ports[i].ahci_port)
+ break;
+ }
+ if (i == MAX_PORTS)
+ return;
+ port = &ports[i];
+
+ /* Has to be 1K-aligned */
+ mem = vmalloc (size);
+ if (!mem)
+ return;
+ assert (!(((unsigned long) mem) & (1024-1)));
+ memset (mem, 0, size);
+
+ port->ahci_host = ahci_host;
+ port->ahci_port = ahci_port;
+ port->cls = cls;
+
+ port->command = command = mem;
+ port->fis = fis = (void*) command + cls * sizeof(*command);
+ port->prdtl = prdtl = (void*) fis + sizeof(*fis);
+
+ /* Stop commands */
+ writel(readl(&ahci_port->cmd) & ~PORT_CMD_START, &ahci_port->cmd);
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for list completion\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ writel(readl(&ahci_port->cmd) & ~PORT_CMD_FIS_RX, &ahci_port->cmd);
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_FIS_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for FIS completion\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ /* We don't support 64bit */
+ /* Point controller to our buffers */
+ writel(0, &ahci_port->clbu);
+ writel(vmtophys((void*) command), &ahci_port->clb);
+ writel(0, &ahci_port->fbu);
+ writel(vmtophys((void*) fis), &ahci_port->fb);
+
+ /* Clear any previous interrupts */
+ writel(readl(&ahci_port->is), &ahci_port->is);
+ writel(1 << (ahci_port - ahci_host->ports), &ahci_host->is);
+
+ /* And activate them */
+ writel(DEF_PORT_IRQ, &ahci_port->ie);
+ writel(readl(&ahci_host->ghc) | HOST_IRQ_EN, &ahci_host->ghc);
+
+ for (i = 0; i < cls; i++)
+ {
+ command[i].ctbau = 0;
+ command[i].ctba = vmtophys((void*) &prdtl[i]);
+ }
+
+ /* Start commands */
+ timeout = jiffies + WAIT_MAX;
+ while (readl(&ahci_port->cmd) & PORT_CMD_LIST_ON)
+ if (jiffies > timeout) {
+ printk("sd%u: timeout waiting for list completion\n", port-ports);
+ port->ahci_host = NULL;
+ port->ahci_port = NULL;
+ return;
+ }
+
+ writel(readl(&ahci_port->cmd) | PORT_CMD_FIS_RX | PORT_CMD_START, &ahci_port->cmd);
+
+ if (ahci_identify(ahci_host, ahci_port, port, WIN_IDENTIFY) >= 2)
+ /* Try ATAPI */
+ ahci_identify(ahci_host, ahci_port, port, WIN_PIDENTIFY);
}
/* Probe one AHCI PCI device */
@@ -779,41 +826,56 @@ static void ahci_probe_dev(unsigned char bus, unsigned char device)
/* Get configuration */
if (pcibios_read_config_byte(bus, device, PCI_HEADER_TYPE, &hdrtype) != PCIBIOS_SUCCESSFUL) {
- printk("ahci: %02u:%02u.%u: Can not read configuration", bus, dev, fun);
+ printk("ahci: %02x:%02x.%x: Can not read configuration", bus, dev, fun);
return;
}
if (hdrtype != 0) {
- printk("ahci: %02u:%02u.%u: Unknown hdrtype %d\n", bus, dev, fun, hdrtype);
+ printk("ahci: %02x:%02x.%x: Unknown hdrtype %d\n", bus, dev, fun, hdrtype);
return;
}
if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_5, &bar) != PCIBIOS_SUCCESSFUL) {
- printk("ahci: %02u:%02u.%u: Can not read BAR 5", bus, dev, fun);
+ printk("ahci: %02x:%02x.%x: Can not read BAR 5", bus, dev, fun);
return;
}
- if (bar & 0x01) {
- printk("ahci: %02u:%02u.%u: BAR 5 is I/O?!", bus, dev, fun);
+ if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
+ printk("ahci: %02x:%02x.%x: BAR 5 is I/O?!", bus, dev, fun);
return;
}
- bar &= ~0x0f;
+ bar &= PCI_BASE_ADDRESS_MEM_MASK;
if (pcibios_read_config_byte(bus, device, PCI_INTERRUPT_LINE, &irq) != PCIBIOS_SUCCESSFUL) {
- printk("ahci: %02u:%02u.%u: Can not read IRQ", bus, dev, fun);
+ printk("ahci: %02x:%02x.%x: Can not read IRQ", bus, dev, fun);
return;
}
- printk("AHCI SATA %02u:%02u.%u BAR 0x%x IRQ %u\n", bus, dev, fun, bar, irq);
+ printk("AHCI SATA %02x:%02x.%x BAR 0x%x IRQ %u\n", bus, dev, fun, bar, irq);
/* Map mmio */
ahci_host = vremap(bar, 0x2000);
/* Request IRQ */
if (request_irq(irq, &ahci_interrupt, SA_SHIRQ, "ahci", (void*) ahci_host)) {
- printk("ahci: %02u:%02u.%u: Can not get irq %u\n", bus, dev, fun, irq);
+ printk("ahci: %02x:%02x.%x: Can not get irq %u\n", bus, dev, fun, irq);
return;
}
+#ifdef CONFIG_BLK_DEV_IDE
+ /* OK, we will handle it. Disable probing on legacy IDE ports it may have. */
+ for (i = 0; i < 6; i++)
+ {
+ unsigned mybar;
+ if (pcibios_read_config_dword(bus, device, PCI_BASE_ADDRESS_0 + i*4, &mybar) == PCIBIOS_SUCCESSFUL) {
+ if (!(bar & PCI_BASE_ADDRESS_SPACE_IO))
+ /* Memory, don't care */
+ continue;
+ /* printk("ahci: %02x:%02x.%x: BAR %d is %x\n", bus, dev, fun, i, mybar); */
+ ide_disable_base(bar & PCI_BASE_ADDRESS_IO_MASK);
+ }
+ }
+#endif
+
nports = (readl(&ahci_host->cap) & 0x1f) + 1;
port_map = readl(&ahci_host->pi);
@@ -822,7 +884,7 @@ static void ahci_probe_dev(unsigned char bus, unsigned char device)
n++;
if (nports != n) {
- printk("ahci: %02u:%02u.%u: Odd number of ports, assuming %d is correct\n", bus, dev, fun, nports);
+ printk("ahci: %02x:%02x.%x: Odd number of ports %u, assuming %u is correct\n", bus, dev, fun, n, nports);
port_map = 0;
}
if (!port_map) {
@@ -831,6 +893,7 @@ static void ahci_probe_dev(unsigned char bus, unsigned char device)
for (i = 0; i < AHCI_MAX_PORTS; i++) {
u32 ssts;
+ u8 spd, ipm;
if (!(port_map & (1U << i)))
continue;
@@ -838,12 +901,45 @@ static void ahci_probe_dev(unsigned char bus, unsigned char device)
ahci_port = &ahci_host->ports[i];
ssts = readl(&ahci_port->ssts);
- if ((ssts & 0xf) != 0x3)
- /* Device not present */
- continue;
- if (((ssts >> 8) & 0xf) != 0x1)
- /* Device down */
- continue;
+ spd = ssts & 0xf;
+ switch (spd)
+ {
+ case 0x0:
+ /* Device not present */
+ continue;
+ case 0x1:
+ printk("ahci: %02x:%02x.%x: Port %u communication not established. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ case 0x3:
+ /* Present and communication established */
+ break;
+ case 0x4:
+ printk("ahci: %02x:%02x.%x: Port %u phy offline?!\n", bus, dev, fun, i);
+ continue;
+ default:
+ printk("ahci: %02x:%02x.%x: Unknown port %u SPD %x\n", bus, dev, fun, i, spd);
+ continue;
+ }
+
+ ipm = (ssts >> 8) & 0xf;
+ switch (ipm)
+ {
+ case 0x0:
+ /* Device not present */
+ continue;
+ case 0x1:
+ /* Active */
+ break;
+ case 0x2:
+ printk("ahci: %02x:%02x.%x: Port %u in Partial power management. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ case 0x6:
+ printk("ahci: %02x:%02x.%x: Port %u in Slumber power management. TODO: power on device\n", bus, dev, fun, i);
+ continue;
+ default:
+ printk("ahci: %02x:%02x.%x: Unknown port %u IPM %x\n", bus, dev, fun, i, ipm);
+ continue;
+ }
/* OK! Probe this port */
ahci_probe_port(ahci_host, ahci_port);
@@ -859,6 +955,8 @@ static void ahci_geninit(struct gendisk *gd)
for (unit = 0; unit < gd->nr_real; unit++) {
port = &ports[unit];
port->part[0].nr_sects = port->capacity;
+ if (!port->part[0].nr_sects)
+ port->part[0].nr_sects = -1;
}
}
diff --git a/linux/dev/drivers/block/floppy.c b/linux/dev/drivers/block/floppy.c
index 4c0977a3..83d66f05 100644
--- a/linux/dev/drivers/block/floppy.c
+++ b/linux/dev/drivers/block/floppy.c
@@ -3723,7 +3723,7 @@ static int floppy_revalidate(kdev_t dev)
return 1;
}
if (bh && !buffer_uptodate(bh))
- ll_rw_block(READ, 1, &bh);
+ ll_rw_block(READ, 1, &bh, 1);
process_fd_request();
wait_on_buffer(bh);
brelse(bh);
diff --git a/linux/dev/drivers/block/genhd.c b/linux/dev/drivers/block/genhd.c
index 95b499b1..3a861386 100644
--- a/linux/dev/drivers/block/genhd.c
+++ b/linux/dev/drivers/block/genhd.c
@@ -27,6 +27,8 @@
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/blk.h>
#endif
+#include <linux/hdreg.h>
+#include <alloca.h>
#include <asm/system.h>
@@ -768,12 +770,36 @@ static void setup_dev(struct gendisk *dev)
void device_setup(void)
{
extern void console_map_init(void);
+ extern char *kernel_cmdline;
+ char *c, *param, *white;
struct gendisk *p;
int nr=0;
#ifdef MACH
linux_intr_pri = SPL6;
#endif
+ for (c = kernel_cmdline; c; )
+ {
+ param = strstr(c, " ide");
+ if (!param)
+ param = strstr(c, " hd");
+ if (!param)
+ break;
+ if (param) {
+ param++;
+ white = strchr(param, ' ');
+ if (!white) {
+ ide_setup(param);
+ c = NULL;
+ } else {
+ char *word = alloca(white - param + 1);
+ strncpy(word, param, white - param);
+ word[white-param] = '\0';
+ ide_setup(word);
+ c = white + 1;
+ }
+ }
+ }
#ifndef MACH
chr_dev_init();
#endif
diff --git a/linux/dev/glue/block.c b/linux/dev/glue/block.c
index 011b6f56..74126ebe 100644
--- a/linux/dev/glue/block.c
+++ b/linux/dev/glue/block.c
@@ -384,7 +384,7 @@ bread (kdev_t dev, int block, int size)
bh = getblk (dev, block, size);
if (bh)
{
- ll_rw_block (READ, 1, &bh);
+ ll_rw_block (READ, 1, &bh, 0);
wait_on_buffer (bh);
if (! buffer_uptodate (bh))
{
@@ -444,7 +444,7 @@ enqueue_request (struct request *req)
/* Perform the I/O operation RW on the buffer list BH
containing NR buffers. */
void
-ll_rw_block (int rw, int nr, struct buffer_head **bh)
+ll_rw_block (int rw, int nr, struct buffer_head **bh, int quiet)
{
int i, bshift, bsize;
unsigned major;
@@ -476,6 +476,7 @@ ll_rw_block (int rw, int nr, struct buffer_head **bh)
r->rq_dev = bh[0]->b_dev;
r->cmd = rw;
r->errors = 0;
+ r->quiet = quiet;
r->sector = bh[0]->b_blocknr << (bshift - 9);
r->current_nr_sectors = bh[0]->b_size >> 9;
r->buffer = bh[0]->b_data;
@@ -528,7 +529,7 @@ rdwr_partial (int rw, kdev_t dev, loff_t *off,
bh->b_data = alloc_buffer (bh->b_size);
if (! bh->b_data)
return -ENOMEM;
- ll_rw_block (READ, 1, &bh);
+ ll_rw_block (READ, 1, &bh, 0);
wait_on_buffer (bh);
if (buffer_uptodate (bh))
{
@@ -542,7 +543,7 @@ rdwr_partial (int rw, kdev_t dev, loff_t *off,
{
memcpy (bh->b_data + o, *buf, c);
bh->b_state = (1 << BH_Dirty) | (1 << BH_Lock);
- ll_rw_block (WRITE, 1, &bh);
+ ll_rw_block (WRITE, 1, &bh, 0);
wait_on_buffer (bh);
if (! buffer_uptodate (bh))
{
@@ -623,7 +624,8 @@ rdwr_full (int rw, kdev_t dev, loff_t *off, char **buf, int *resid, int bshift)
}
if (! err)
{
- ll_rw_block (rw, i, bhp);
+ assert (i > 0);
+ ll_rw_block (rw, i, bhp, 0);
wait_on_buffer (bhp[i - 1]);
}
for (bh = bhead, cc = 0, j = 0; j < i; cc += bh->b_size, bh++, j++)
@@ -1155,7 +1157,7 @@ out:
{
ipc_kobject_set (bd->port, IKO_NULL, IKOT_NONE);
ipc_port_dealloc_kernel (bd->port);
- *devp = IP_NULL;
+ *devp = (device_t) IP_NULL;
}
kfree ((vm_offset_t) bd, sizeof (struct block_data));
bd = NULL;
@@ -1659,41 +1661,6 @@ device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
*status_count = DEV_GET_RECORDS_COUNT;
break;
- case V_GETPARMS:
- if (*status_count < (sizeof (struct disk_parms) / sizeof (int)))
- return D_INVALID_OPERATION;
- else
- {
- struct disk_parms *dp = status;
- struct hd_geometry hg;
- DECL_DATA;
-
- INIT_DATA();
-
- if ((*bd->ds->fops->ioctl) (&td.inode, &td.file,
- HDIO_GETGEO, (unsigned long)&hg))
- return D_INVALID_OPERATION;
-
- dp->dp_type = DPT_WINI; /* XXX: It may be a floppy... */
- dp->dp_heads = hg.heads;
- dp->dp_cyls = hg.cylinders;
- dp->dp_sectors = hg.sectors;
- dp->dp_dosheads = hg.heads;
- dp->dp_doscyls = hg.cylinders;
- dp->dp_dossectors = hg.sectors;
- dp->dp_secsiz = 512; /* XXX */
- dp->dp_ptag = 0;
- dp->dp_pflag = 0;
-
- /* XXX */
- dp->dp_pstartsec = -1;
- dp->dp_pnumsec = -1;
-
- *status_count = sizeof (struct disk_parms) / sizeof (int);
- }
-
- break;
-
default:
return D_INVALID_OPERATION;
}
@@ -1703,7 +1670,7 @@ device_get_status (void *d, dev_flavor_t flavor, dev_status_t status,
static io_return_t
device_set_status (void *d, dev_flavor_t flavor, dev_status_t status,
- mach_msg_type_number_t *status_count)
+ mach_msg_type_number_t status_count)
{
struct block_data *bd = d;
diff --git a/linux/dev/glue/glue.h b/linux/dev/glue/glue.h
index 5d4f6d88..8cb118cc 100644
--- a/linux/dev/glue/glue.h
+++ b/linux/dev/glue/glue.h
@@ -25,8 +25,8 @@
extern int linux_auto_config;
extern int linux_intr_pri;
-extern void *alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
-extern void free_contig_mem (vm_page_t);
+extern unsigned long alloc_contig_mem (unsigned, unsigned, unsigned, vm_page_t *);
+extern void free_contig_mem (vm_page_t, unsigned);
extern void init_IRQ (void);
extern void restore_IRQ (void);
extern void linux_kmem_init (void);
diff --git a/linux/dev/glue/kmem.c b/linux/dev/glue/kmem.c
index ff052ffc..ed576105 100644
--- a/linux/dev/glue/kmem.c
+++ b/linux/dev/glue/kmem.c
@@ -111,10 +111,8 @@ linux_kmem_init ()
for (p = pages, j = 0; j < MEM_CHUNK_SIZE - PAGE_SIZE; j += PAGE_SIZE)
{
assert (p->phys_addr < MEM_DMA_LIMIT);
- assert (p->phys_addr + PAGE_SIZE
- == ((vm_page_t) p->pageq.next)->phys_addr);
-
- p = (vm_page_t) p->pageq.next;
+ assert (p->phys_addr + PAGE_SIZE == (p + 1)->phys_addr);
+ p++;
}
pages_free[i].end = pages_free[i].start + MEM_CHUNK_SIZE;
diff --git a/linux/dev/include/asm-i386/string.h b/linux/dev/include/asm-i386/string.h
index bdb75455..f41ca5c0 100644
--- a/linux/dev/include/asm-i386/string.h
+++ b/linux/dev/include/asm-i386/string.h
@@ -28,7 +28,7 @@
*/
#define __HAVE_ARCH_STRCPY
-extern inline char * strcpy(char * dest,const char *src)
+static inline char * strcpy(char * dest,const char *src)
{
int d0, d1, d2;
__asm__ __volatile__(
@@ -43,7 +43,7 @@ return dest;
}
#define __HAVE_ARCH_STRNCPY
-extern inline char * strncpy(char * dest,const char *src,size_t count)
+static inline char * strncpy(char * dest,const char *src,size_t count)
{
int d0, d1, d2, d3;
__asm__ __volatile__(
@@ -63,7 +63,7 @@ return dest;
}
#define __HAVE_ARCH_STRCAT
-extern inline char * strcat(char * dest,const char * src)
+static inline char * strcat(char * dest,const char * src)
{
int d0, d1, d2, d3;
__asm__ __volatile__(
@@ -81,7 +81,7 @@ return dest;
}
#define __HAVE_ARCH_STRNCAT
-extern inline char * strncat(char * dest,const char * src,size_t count)
+static inline char * strncat(char * dest,const char * src,size_t count)
{
int d0, d1, d2, d3;
__asm__ __volatile__(
@@ -105,7 +105,7 @@ return dest;
}
#define __HAVE_ARCH_STRCMP
-extern inline int strcmp(const char * cs,const char * ct)
+static inline int strcmp(const char * cs,const char * ct)
{
int d0, d1;
register int __res;
@@ -127,7 +127,7 @@ return __res;
}
#define __HAVE_ARCH_STRNCMP
-extern inline int strncmp(const char * cs,const char * ct,size_t count)
+static inline int strncmp(const char * cs,const char * ct,size_t count)
{
register int __res;
int d0, d1, d2;
@@ -151,7 +151,7 @@ return __res;
}
#define __HAVE_ARCH_STRCHR
-extern inline char * strchr(const char * s, int c)
+static inline char * strchr(const char * s, int c)
{
int d0;
register char * __res;
@@ -171,7 +171,7 @@ return __res;
}
#define __HAVE_ARCH_STRRCHR
-extern inline char * strrchr(const char * s, int c)
+static inline char * strrchr(const char * s, int c)
{
int d0, d1;
register char * __res;
@@ -189,7 +189,7 @@ return __res;
}
#define __HAVE_ARCH_STRLEN
-extern inline size_t strlen(const char * s)
+static inline size_t strlen(const char * s)
{
int d0;
register int __res;
@@ -203,7 +203,7 @@ __asm__ __volatile__(
return __res;
}
-extern inline void * __memcpy(void * to, const void * from, size_t n)
+static inline void * __memcpy(void * to, const void * from, size_t n)
{
int d0, d1, d2;
__asm__ __volatile__(
@@ -226,7 +226,7 @@ return (to);
* This looks horribly ugly, but the compiler can optimize it totally,
* as the count is constant.
*/
-extern inline void * __constant_memcpy(void * to, const void * from, size_t n)
+static inline void * __constant_memcpy(void * to, const void * from, size_t n)
{
switch (n) {
case 0:
@@ -299,7 +299,7 @@ __asm__ __volatile__( \
__memcpy((t),(f),(n)))
#define __HAVE_ARCH_MEMMOVE
-extern inline void * memmove(void * dest,const void * src, size_t n)
+static inline void * memmove(void * dest,const void * src, size_t n)
{
int d0, d1, d2;
if (dest<src)
@@ -327,7 +327,7 @@ return dest;
#define memcmp __builtin_memcmp
#define __HAVE_ARCH_MEMCHR
-extern inline void * memchr(const void * cs,int c,size_t count)
+static inline void * memchr(const void * cs,int c,size_t count)
{
int d0;
register void * __res;
@@ -344,7 +344,7 @@ __asm__ __volatile__(
return __res;
}
-extern inline void * __memset_generic(void * s, char c,size_t count)
+static inline void * __memset_generic(void * s, char c,size_t count)
{
int d0, d1;
__asm__ __volatile__(
@@ -365,7 +365,7 @@ return s;
* things 32 bits at a time even when we don't know the size of the
* area at compile-time..
*/
-extern inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
+static inline void * __constant_c_memset(void * s, unsigned long c, size_t count)
{
int d0, d1;
__asm__ __volatile__(
@@ -386,7 +386,7 @@ return (s);
/* Added by Gertjan van Wingerde to make minix and sysv module work */
#define __HAVE_ARCH_STRNLEN
-extern inline size_t strnlen(const char * s, size_t count)
+static inline size_t strnlen(const char * s, size_t count)
{
int d0;
register int __res;
@@ -410,7 +410,7 @@ return __res;
* This looks horribly ugly, but the compiler can optimize it totally,
* as we by now know that both pattern and count is constant..
*/
-extern inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
+static inline void * __constant_c_and_count_memset(void * s, unsigned long pattern, size_t count)
{
switch (count) {
case 0:
@@ -469,7 +469,7 @@ __asm__ __volatile__("cld\n\t" \
* find the first occurrence of byte 'c', or 1 past the area if none
*/
#define __HAVE_ARCH_MEMSCAN
-extern inline void * memscan(void * addr, int c, size_t size)
+static inline void * memscan(void * addr, int c, size_t size)
{
if (!size)
return addr;
diff --git a/linux/dev/include/linux/blk.h b/linux/dev/include/linux/blk.h
index 412b8641..b924a14f 100644
--- a/linux/dev/include/linux/blk.h
+++ b/linux/dev/include/linux/blk.h
@@ -78,6 +78,7 @@ extern int hd_init(void);
#endif
#ifdef CONFIG_BLK_DEV_IDE
extern int ide_init(void);
+extern void ide_disable_base(unsigned base);
#endif
#ifdef CONFIG_BLK_DEV_XD
extern int xd_init(void);
@@ -391,8 +392,9 @@ static void end_request(int uptodate) {
req->errors = 0;
if (!uptodate) {
- printk("end_request: I/O error, dev %s, sector %lu\n",
- kdevname(req->rq_dev), req->sector);
+ if (!req->quiet)
+ printk("end_request: I/O error, dev %s, sector %lu\n",
+ kdevname(req->rq_dev), req->sector);
#ifdef MACH
for (bh = req->bh; bh; )
{
diff --git a/linux/dev/include/linux/blkdev.h b/linux/dev/include/linux/blkdev.h
index e9a40d7e..5bf0a288 100644
--- a/linux/dev/include/linux/blkdev.h
+++ b/linux/dev/include/linux/blkdev.h
@@ -23,6 +23,7 @@ struct request {
kdev_t rq_dev;
int cmd; /* READ or WRITE */
int errors;
+ int quiet;
unsigned long sector;
unsigned long nr_sectors;
unsigned long current_nr_sectors;
diff --git a/linux/dev/include/linux/fs.h b/linux/dev/include/linux/fs.h
index 740ebb54..def2bc98 100644
--- a/linux/dev/include/linux/fs.h
+++ b/linux/dev/include/linux/fs.h
@@ -638,7 +638,7 @@ extern int nr_buffer_heads;
#define NR_LIST 4
#ifdef MACH
-extern inline void
+static inline void
mark_buffer_uptodate (struct buffer_head *bh, int on)
{
if (on)
@@ -650,7 +650,7 @@ mark_buffer_uptodate (struct buffer_head *bh, int on)
void mark_buffer_uptodate(struct buffer_head * bh, int on);
#endif
-extern inline void mark_buffer_clean(struct buffer_head * bh)
+static inline void mark_buffer_clean(struct buffer_head * bh)
{
#ifdef MACH
clear_bit (BH_Dirty, &bh->b_state);
@@ -662,7 +662,7 @@ extern inline void mark_buffer_clean(struct buffer_head * bh)
#endif
}
-extern inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
+static inline void mark_buffer_dirty(struct buffer_head * bh, int flag)
{
#ifdef MACH
set_bit (BH_Dirty, &bh->b_state);
@@ -733,7 +733,7 @@ extern struct file * get_empty_filp(void);
extern int close_fp(struct file *filp);
extern struct buffer_head * get_hash_table(kdev_t dev, int block, int size);
extern struct buffer_head * getblk(kdev_t dev, int block, int size);
-extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[]);
+extern void ll_rw_block(int rw, int nr, struct buffer_head * bh[], int quiet);
extern void ll_rw_page(int rw, kdev_t dev, unsigned long nr, char * buffer);
extern void ll_rw_swap_file(int rw, kdev_t dev, unsigned int *b, int nb, char *buffer);
extern int is_read_only(kdev_t dev);
diff --git a/linux/dev/include/linux/locks.h b/linux/dev/include/linux/locks.h
index 72cf108d..ae063fba 100644
--- a/linux/dev/include/linux/locks.h
+++ b/linux/dev/include/linux/locks.h
@@ -20,13 +20,13 @@ extern struct buffer_head *reuse_list;
*/
extern void __wait_on_buffer(struct buffer_head *);
-extern inline void wait_on_buffer(struct buffer_head * bh)
+static inline void wait_on_buffer(struct buffer_head * bh)
{
if (test_bit(BH_Lock, &bh->b_state))
__wait_on_buffer(bh);
}
-extern inline void lock_buffer(struct buffer_head * bh)
+static inline void lock_buffer(struct buffer_head * bh)
{
while (set_bit(BH_Lock, &bh->b_state))
__wait_on_buffer(bh);
@@ -42,20 +42,20 @@ void unlock_buffer(struct buffer_head *);
*/
extern void __wait_on_super(struct super_block *);
-extern inline void wait_on_super(struct super_block * sb)
+static inline void wait_on_super(struct super_block * sb)
{
if (sb->s_lock)
__wait_on_super(sb);
}
-extern inline void lock_super(struct super_block * sb)
+static inline void lock_super(struct super_block * sb)
{
if (sb->s_lock)
__wait_on_super(sb);
sb->s_lock = 1;
}
-extern inline void unlock_super(struct super_block * sb)
+static inline void unlock_super(struct super_block * sb)
{
sb->s_lock = 0;
wake_up(&sb->s_wait);
diff --git a/linux/dev/include/linux/mm.h b/linux/dev/include/linux/mm.h
index cd061378..b0c3ab08 100644
--- a/linux/dev/include/linux/mm.h
+++ b/linux/dev/include/linux/mm.h
@@ -234,7 +234,7 @@ extern mem_map_t * mem_map;
#define __get_dma_pages(priority, order) __get_free_pages((priority),(order),1)
extern unsigned long __get_free_pages(int priority, unsigned long gfporder, int dma);
-extern inline unsigned long get_free_page(int priority)
+static inline unsigned long get_free_page(int priority)
{
unsigned long page;
diff --git a/linux/dev/include/linux/types.h b/linux/dev/include/linux/types.h
index 57bb25f4..b697d9ec 100644
--- a/linux/dev/include/linux/types.h
+++ b/linux/dev/include/linux/types.h
@@ -109,6 +109,15 @@ struct ustat {
char f_fpack[6];
};
+/* stdint.h */
+typedef s8 int8_t;
+typedef u8 uint8_t;
+typedef s16 int16_t;
+typedef u16 uint16_t;
+typedef s32 int32_t;
+typedef u32 uint32_t;
+typedef s64 int64_t;
+typedef u64 uint64_t;
/* Yes, this is ugly. But that's why it is called glue code. */
diff --git a/linux/dev/init/main.c b/linux/dev/init/main.c
index ecbd0b68..d69b3fc7 100644
--- a/linux/dev/init/main.c
+++ b/linux/dev/init/main.c
@@ -98,7 +98,7 @@ void
linux_init (void)
{
int addr;
- unsigned memory_start, memory_end;
+ unsigned long memory_start, memory_end;
vm_page_t pages;
/*
@@ -131,9 +131,7 @@ linux_init (void)
/*
* Allocate contiguous memory below 16 MB.
*/
- memory_start = (unsigned long) alloc_contig_mem (CONTIG_ALLOC,
- 16 * 1024 * 1024,
- 0, &pages);
+ memory_start = alloc_contig_mem (CONTIG_ALLOC, 16 * 1024 * 1024, 0, &pages);
if (memory_start == 0)
panic ("linux_init: alloc_contig_mem failed");
memory_end = memory_start + CONTIG_ALLOC;
@@ -147,14 +145,6 @@ linux_init (void)
panic ("linux_init: ran out memory");
/*
- * Free unused memory.
- */
- while (pages && phystokv(pages->phys_addr) < round_page (memory_start))
- pages = (vm_page_t) pages->pageq.next;
- if (pages)
- free_contig_mem (pages);
-
- /*
* Initialize devices.
*/
#ifdef CONFIG_INET
@@ -182,140 +172,31 @@ linux_init (void)
/*
* Allocate contiguous memory with the given constraints.
- * This routine is horribly inefficient but it is presently
- * only used during initialization so it's not that bad.
*/
-void *
+unsigned long
alloc_contig_mem (unsigned size, unsigned limit,
unsigned mask, vm_page_t * pages)
{
- int i, j, bits_len;
- unsigned *bits, len;
- void *m;
- vm_page_t p, page_list, tail, prev;
- vm_offset_t addr, max_addr;
-
- if (size == 0)
- return (NULL);
- size = round_page (size);
- if ((size >> PAGE_SHIFT) > vm_page_free_count)
- return (NULL);
-
- /* Allocate bit array. */
- max_addr = phys_last_addr;
- if (max_addr > limit)
- max_addr = limit;
- bits_len = ((((max_addr >> PAGE_SHIFT) + NBPW - 1) / NBPW)
- * sizeof (unsigned));
- bits = (unsigned *) kalloc (bits_len);
- if (!bits)
- return (NULL);
- memset (bits, 0, bits_len);
+ vm_page_t p;
- /*
- * Walk the page free list and set a bit for every usable page.
- */
- simple_lock (&vm_page_queue_free_lock);
- p = vm_page_queue_free;
- while (p)
- {
- if (p->phys_addr < limit)
- (bits[(p->phys_addr >> PAGE_SHIFT) / NBPW]
- |= 1 << ((p->phys_addr >> PAGE_SHIFT) % NBPW));
- p = (vm_page_t) p->pageq.next;
- }
+ p = vm_page_grab_contig(size, VM_PAGE_SEL_DMA);
- /*
- * Scan bit array for contiguous pages.
- */
- len = 0;
- m = NULL;
- for (i = 0; len < size && i < bits_len / sizeof (unsigned); i++)
- for (j = 0; len < size && j < NBPW; j++)
- if (!(bits[i] & (1 << j)))
- {
- len = 0;
- m = NULL;
- }
- else
- {
- if (len == 0)
- {
- addr = ((vm_offset_t) (i * NBPW + j)
- << PAGE_SHIFT);
- if ((addr & mask) == 0)
- {
- len += PAGE_SIZE;
- m = (void *) addr;
- }
- }
- else
- len += PAGE_SIZE;
- }
-
- if (len != size)
- {
- simple_unlock (&vm_page_queue_free_lock);
- kfree ((vm_offset_t) bits, bits_len);
- return (NULL);
- }
-
- /*
- * Remove pages from free list
- * and construct list to return to caller.
- */
- page_list = NULL;
- for (len = 0; len < size; len += PAGE_SIZE, addr += PAGE_SIZE)
- {
- prev = NULL;
- for (p = vm_page_queue_free; p; p = (vm_page_t) p->pageq.next)
- {
- if (p->phys_addr == addr)
- break;
- prev = p;
- }
- if (!p)
- panic ("alloc_contig_mem: page not on free list");
- if (prev)
- prev->pageq.next = p->pageq.next;
- else
- vm_page_queue_free = (vm_page_t) p->pageq.next;
- p->free = FALSE;
- p->pageq.next = NULL;
- if (!page_list)
- page_list = tail = p;
- else
- {
- tail->pageq.next = (queue_entry_t) p;
- tail = p;
- }
- vm_page_free_count--;
- }
+ if (p == NULL)
+ return 0;
- simple_unlock (&vm_page_queue_free_lock);
- kfree ((vm_offset_t) bits, bits_len);
if (pages)
- *pages = page_list;
- return phystokv(m);
+ *pages = p;
+
+ return phystokv(vm_page_to_pa(p));
}
/*
* Free memory allocated by alloc_contig_mem.
*/
void
-free_contig_mem (vm_page_t pages)
+free_contig_mem (vm_page_t pages, unsigned size)
{
- int i;
- vm_page_t p;
-
- for (p = pages, i = 0; p->pageq.next; p = (vm_page_t) p->pageq.next, i++)
- p->free = TRUE;
- p->free = TRUE;
- simple_lock (&vm_page_queue_free_lock);
- vm_page_free_count += i + 1;
- p->pageq.next = (queue_entry_t) vm_page_queue_free;
- vm_page_queue_free = pages;
- simple_unlock (&vm_page_queue_free_lock);
+ vm_page_free_contig(pages, size);
}
/* This is the number of bits of precision for the loops_per_second. Each
diff --git a/linux/pcmcia-cs/clients/axnet_cs.c b/linux/pcmcia-cs/clients/axnet_cs.c
index bcd79b0e..2e7d9edc 100644
--- a/linux/pcmcia-cs/clients/axnet_cs.c
+++ b/linux/pcmcia-cs/clients/axnet_cs.c
@@ -1814,7 +1814,7 @@ static void set_multicast_list(struct net_device *dev)
static int axdev_init(struct net_device *dev)
{
if (ei_debug > 1)
- printk(version_8390);
+ printk("%s", version_8390);
if (dev->priv == NULL)
{
diff --git a/linux/pcmcia-cs/glue/ds.c b/linux/pcmcia-cs/glue/ds.c
index 8f88b553..cc4b92b5 100644
--- a/linux/pcmcia-cs/glue/ds.c
+++ b/linux/pcmcia-cs/glue/ds.c
@@ -24,12 +24,6 @@
/* This file is included from linux/pcmcia-cs/modules/ds.c. */
/*
- * Prepare the namespace for inclusion of Mach header files.
- */
-
-#undef PAGE_SHIFT
-
-/*
* This is really ugly. But this is glue code, so... It's about the `kfree'
* symbols in <linux/malloc.h> and <kern/kalloc.h>.
*/
diff --git a/linux/src/arch/i386/kernel/bios32.c b/linux/src/arch/i386/kernel/bios32.c
index b069ce46..c10cc0c0 100644
--- a/linux/src/arch/i386/kernel/bios32.c
+++ b/linux/src/arch/i386/kernel/bios32.c
@@ -909,6 +909,8 @@ unsigned long pcibios_init(unsigned long memory_start, unsigned long memory_end)
}
if (bios32_entry && check_pcibios())
access_pci = &pci_bios_access;
+ else
+ access_pci = check_direct_pci();
#endif
return memory_start;
}
diff --git a/linux/src/drivers/block/ide-cd.c b/linux/src/drivers/block/ide-cd.c
index e4548f54..020a8313 100644
--- a/linux/src/drivers/block/ide-cd.c
+++ b/linux/src/drivers/block/ide-cd.c
@@ -649,7 +649,7 @@ static void cdrom_end_request (int uptodate, ide_drive_t *drive)
{
struct request *rq = HWGROUP(drive)->rq;
- if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate) {
+ if (rq->cmd == REQUEST_SENSE_COMMAND && uptodate && !rq->quiet) {
struct packet_command *pc = (struct packet_command *)
rq->buffer;
cdrom_analyze_sense_data (drive,
@@ -727,16 +727,18 @@ static int cdrom_decode_status (ide_drive_t *drive, int good_stat,
because workman constantly polls the drive
with this command, and we don't want
to uselessly fill up the syslog. */
- if (pc->c[0] != SCMD_READ_SUBCHANNEL)
+ if (pc->c[0] != SCMD_READ_SUBCHANNEL && !rq->quiet)
printk ("%s : tray open or drive not ready\n",
drive->name);
} else if (sense_key == UNIT_ATTENTION) {
/* Check for media change. */
cdrom_saw_media_change (drive);
- printk ("%s: media changed\n", drive->name);
+ if (!rq->quiet)
+ printk ("%s: media changed\n", drive->name);
} else {
/* Otherwise, print an error. */
- ide_dump_status (drive, "packet command error",
+ if (!rq->quiet)
+ ide_dump_status (drive, "packet command error",
stat);
}
@@ -768,7 +770,8 @@ static int cdrom_decode_status (ide_drive_t *drive, int good_stat,
cdrom_saw_media_change (drive);
/* Fail the request. */
- printk ("%s : tray open\n", drive->name);
+ if (!rq->quiet)
+ printk ("%s : tray open\n", drive->name);
cdrom_end_request (0, drive);
} else if (sense_key == UNIT_ATTENTION) {
/* Media change. */
@@ -783,7 +786,8 @@ static int cdrom_decode_status (ide_drive_t *drive, int good_stat,
sense_key == DATA_PROTECT) {
/* No point in retrying after an illegal
request or data protect error.*/
- ide_dump_status (drive, "command error", stat);
+ if (!rq->quiet)
+ ide_dump_status (drive, "command error", stat);
cdrom_end_request (0, drive);
} else if ((err & ~ABRT_ERR) != 0) {
/* Go to the default handler
@@ -1406,7 +1410,7 @@ void cdrom_sleep (int time)
#endif
static
-int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc)
+int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc, int quiet)
{
struct atapi_request_sense my_reqbuf;
int retries = 10;
@@ -1423,6 +1427,7 @@ int cdrom_queue_packet_command (ide_drive_t *drive, struct packet_command *pc)
ide_init_drive_cmd (&req);
req.cmd = PACKET_COMMAND;
req.buffer = (char *)pc;
+ req.quiet = quiet;
(void) ide_do_drive_cmd (drive, &req, ide_wait);
if (pc->stat != 0) {
@@ -1563,7 +1568,7 @@ cdrom_check_status (ide_drive_t *drive,
pc.c[7] = CDROM_STATE_FLAGS (drive)->sanyo_slot % 3;
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 1);
}
@@ -1588,7 +1593,7 @@ cdrom_lockdoor (ide_drive_t *drive, int lockflag,
pc.c[0] = ALLOW_MEDIUM_REMOVAL;
pc.c[4] = (lockflag != 0);
- stat = cdrom_queue_packet_command (drive, &pc);
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
}
if (stat == 0)
@@ -1622,7 +1627,7 @@ cdrom_eject (ide_drive_t *drive, int ejectflag,
pc.c[0] = START_STOP;
pc.c[4] = 2 + (ejectflag != 0);
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -1637,7 +1642,7 @@ cdrom_pause (ide_drive_t *drive, int pauseflag,
pc.c[0] = SCMD_PAUSE_RESUME;
pc.c[8] = !pauseflag;
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -1653,7 +1658,7 @@ cdrom_startstop (ide_drive_t *drive, int startflag,
pc.c[0] = START_STOP;
pc.c[1] = 1;
pc.c[4] = startflag;
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -1676,7 +1681,7 @@ cdrom_read_capacity (ide_drive_t *drive, unsigned *capacity,
pc.buffer = (unsigned char *)&capbuf;
pc.buflen = sizeof (capbuf);
- stat = cdrom_queue_packet_command (drive, &pc);
+ stat = cdrom_queue_packet_command (drive, &pc, 1);
if (stat == 0)
*capacity = ntohl (capbuf.lba);
@@ -1702,7 +1707,7 @@ cdrom_read_tocentry (ide_drive_t *drive, int trackno, int msf_flag,
pc.c[8] = (buflen & 0xff);
pc.c[9] = (format << 6);
if (msf_flag) pc.c[1] = 2;
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 1);
}
@@ -1834,7 +1839,7 @@ cdrom_read_subchannel (ide_drive_t *drive, int format,
pc.c[3] = format,
pc.c[7] = (buflen >> 8);
pc.c[8] = (buflen & 0xff);
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -1855,7 +1860,7 @@ cdrom_mode_sense (ide_drive_t *drive, int pageno, int modeflag,
pc.c[2] = pageno | (modeflag << 6);
pc.c[7] = (buflen >> 8);
pc.c[8] = (buflen & 0xff);
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -1875,7 +1880,7 @@ cdrom_mode_select (ide_drive_t *drive, int pageno, char *buf, int buflen,
pc.c[2] = pageno;
pc.c[7] = (buflen >> 8);
pc.c[8] = (buflen & 0xff);
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -1903,7 +1908,7 @@ cdrom_play_lba_range_1 (ide_drive_t *drive, int lba_start, int lba_end,
}
#endif /* not STANDARD_ATAPI */
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
@@ -2004,7 +2009,7 @@ cdrom_read_block (ide_drive_t *drive, int format, int lba, int nblocks,
else
pc.c[9] = 0x10;
- stat = cdrom_queue_packet_command (drive, &pc);
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
#if ! STANDARD_ATAPI
/* If the drive doesn't recognize the READ CD opcode, retry the command
@@ -2059,7 +2064,7 @@ cdrom_load_unload (ide_drive_t *drive, int slot,
pc.c[0] = LOAD_UNLOAD;
pc.c[4] = 2 + (slot >= 0);
pc.c[8] = slot;
- return cdrom_queue_packet_command (drive, &pc);
+ return cdrom_queue_packet_command (drive, &pc, 0);
}
}
@@ -2575,7 +2580,7 @@ int ide_cdrom_ioctl (ide_drive_t *drive, struct inode *inode,
pc.buffer = buf;
}
- stat = cdrom_queue_packet_command (drive, &pc);
+ stat = cdrom_queue_packet_command (drive, &pc, 0);
if (len > 0)
memcpy_tofs ((void *)arg, buf, len);
@@ -2638,6 +2643,10 @@ int ide_cdrom_open (struct inode *ip, struct file *fp, ide_drive_t *drive)
if (stat == 0 || my_reqbuf.sense_key == UNIT_ATTENTION) {
(void) cdrom_lockdoor (drive, 1, &my_reqbuf);
(void) cdrom_read_toc (drive, &my_reqbuf);
+ } else {
+ /* Otherwise return as missing */
+ --drive->usage;
+ return -ENXIO;
}
}
diff --git a/linux/src/drivers/block/ide.c b/linux/src/drivers/block/ide.c
index 0f3fd01f..dc20fcba 100644
--- a/linux/src/drivers/block/ide.c
+++ b/linux/src/drivers/block/ide.c
@@ -325,7 +325,7 @@
#endif /* CONFIG_BLK_DEV_PROMISE */
static const byte ide_hwif_to_major[MAX_HWIFS] = {IDE0_MAJOR, IDE1_MAJOR, IDE2_MAJOR, IDE3_MAJOR};
-static const unsigned short default_io_base[MAX_HWIFS] = {0x1f0, 0x170, 0x1e8, 0x168};
+static unsigned short default_io_base[MAX_HWIFS] = {0x1f0, 0x170, 0x1e8, 0x168};
static const byte default_irqs[MAX_HWIFS] = {14, 15, 11, 10};
static int idebus_parameter; /* holds the "idebus=" parameter */
static int system_bus_speed; /* holds what we think is VESA/PCI bus speed */
@@ -367,6 +367,15 @@ static void set_recovery_timer (ide_hwif_t *hwif)
#endif /* DISK_RECOVERY_TIME */
+/* Called by other drivers to disable the legacy IDE driver on a given IDE base. */
+void ide_disable_base(unsigned base)
+{
+ unsigned i;
+ for (i = 0; i < MAX_HWIFS; i++)
+ if (default_io_base[i] == base)
+ default_io_base[i] = 0;
+}
+
/*
* Do not even *think* about calling this!
@@ -1927,6 +1936,7 @@ void ide_init_drive_cmd (struct request *rq)
rq->rq_status = RQ_ACTIVE;
rq->rq_dev = ????;
#endif
+ rq->quiet = 0;
}
/*
@@ -2525,7 +2535,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
drive->media = ide_tape;
drive->present = 1;
drive->removable = 1;
- if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL) {
+ if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL && !drive->nodma) {
if (!HWIF(drive)->dmaproc(ide_dma_check, drive))
printk(", DMA");
}
@@ -2652,7 +2662,7 @@ static inline void do_identify (ide_drive_t *drive, byte cmd)
if (drive->mult_req || ((id->multsect_valid & 1) && id->multsect))
drive->special.b.set_multmode = 1;
}
- if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL) {
+ if (drive->autotune != 2 && HWIF(drive)->dmaproc != NULL && !drive->nodma) {
if (!(HWIF(drive)->dmaproc(ide_dma_check, drive))) {
if ((id->field_valid & 4) && (id->dma_ultra & (id->dma_ultra >> 8) & 7))
printk(", UDMA");
@@ -3107,6 +3117,7 @@ static int match_parm (char *s, const char *keywords[], int vals[], int max_vals
* Not fully supported by all chipset types,
* and quite likely to cause trouble with
* older/odd IDE drives.
+ * "hdx=nodma" : disallow DMA for the drive
*
* "idebus=xx" : inform IDE driver of VESA/PCI bus speed in Mhz,
* where "xx" is between 20 and 66 inclusive,
@@ -3151,7 +3162,11 @@ void ide_setup (char *s)
ide_hwif_t *hwif;
ide_drive_t *drive;
unsigned int hw, unit;
+#ifdef MACH
+ const char max_drive = '0' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+#else
const char max_drive = 'a' + ((MAX_HWIFS * MAX_DRIVES) - 1);
+#endif
const char max_hwif = '0' + (MAX_HWIFS - 1);
printk("ide_setup: %s", s);
@@ -3160,11 +3175,19 @@ void ide_setup (char *s)
/*
* Look for drive options: "hdx="
*/
+#ifdef MACH
+ if (s[0] == 'h' && s[1] == 'd' && s[2] >= '0' && s[2] <= max_drive) {
+#else
if (s[0] == 'h' && s[1] == 'd' && s[2] >= 'a' && s[2] <= max_drive) {
+#endif
const char *hd_words[] = {"none", "noprobe", "nowerr", "cdrom",
"serialize", "autotune", "noautotune",
- "slow", "ide-scsi", NULL};
+ "slow", "ide-scsi", "nodma", NULL};
+#ifdef MACH
+ unit = s[2] - '0';
+#else
unit = s[2] - 'a';
+#endif
hw = unit / MAX_DRIVES;
unit = unit % MAX_DRIVES;
hwif = &ide_hwifs[hw];
@@ -3199,6 +3222,9 @@ void ide_setup (char *s)
case -9: /* "ide-scsi" */
drive->ide_scsi = 1;
goto done;
+ case -10: /* "nodma" */
+ drive->nodma = 1;
+ goto done;
case 3: /* cyl,head,sect */
drive->media = ide_disk;
drive->cyl = drive->bios_cyl = vals[0];
diff --git a/linux/src/drivers/block/ide.h b/linux/src/drivers/block/ide.h
index edeedc97..28e371bf 100644
--- a/linux/src/drivers/block/ide.h
+++ b/linux/src/drivers/block/ide.h
@@ -344,6 +344,7 @@ typedef struct ide_drive_s {
unsigned nobios : 1; /* flag: do not probe bios for drive */
unsigned slow : 1; /* flag: slow data port */
unsigned autotune : 2; /* 1=autotune, 2=noautotune, 0=default */
+ unsigned nodma : 1; /* disk should not use dma for read/write */
#if FAKE_FDISK_FOR_EZDRIVE
unsigned remap_0_to_1 : 1; /* flag: partitioned with ezdrive */
#endif /* FAKE_FDISK_FOR_EZDRIVE */
diff --git a/linux/src/drivers/net/3c507.c b/linux/src/drivers/net/3c507.c
index 63f85a4c..58ba2d75 100644
--- a/linux/src/drivers/net/3c507.c
+++ b/linux/src/drivers/net/3c507.c
@@ -354,7 +354,7 @@ int el16_probe1(struct device *dev, int ioaddr)
dev = init_etherdev(0, sizeof(struct net_local));
if (net_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
printk("%s: 3c507 at %#x,", dev->name, ioaddr);
@@ -410,7 +410,7 @@ int el16_probe1(struct device *dev, int ioaddr)
dev->if_port ? "ex" : "in", dev->mem_start, dev->mem_end-1);
if (net_debug)
- printk(version);
+ printk("%s", version);
/* Initialize the device structure. */
dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
diff --git a/linux/src/drivers/net/3c509.c b/linux/src/drivers/net/3c509.c
index f8842882..727595cd 100644
--- a/linux/src/drivers/net/3c509.c
+++ b/linux/src/drivers/net/3c509.c
@@ -314,7 +314,7 @@ int el3_probe(struct device *dev)
el3_root_dev = dev;
if (el3_debug > 0)
- printk(version);
+ printk("%s", version);
/* The EL3-specific entries in the device structure. */
dev->open = &el3_open;
diff --git a/linux/src/drivers/net/3c515.c b/linux/src/drivers/net/3c515.c
index fd6ec50c..52f47032 100644
--- a/linux/src/drivers/net/3c515.c
+++ b/linux/src/drivers/net/3c515.c
@@ -404,7 +404,7 @@ init_module(void)
if (debug >= 0)
vortex_debug = debug;
if (vortex_debug)
- printk(version);
+ printk("%s", version);
root_vortex_dev = NULL;
cards_found = vortex_scan(0);
@@ -419,7 +419,7 @@ int tc515_probe(struct device *dev)
cards_found = vortex_scan(dev);
if (vortex_debug > 0 && cards_found)
- printk(version);
+ printk("%s", version);
return cards_found ? 0 : -ENODEV;
}
diff --git a/linux/src/drivers/net/ac3200.c b/linux/src/drivers/net/ac3200.c
index 0337bab7..600949fa 100644
--- a/linux/src/drivers/net/ac3200.c
+++ b/linux/src/drivers/net/ac3200.c
@@ -208,7 +208,7 @@ static int ac_probe1(int ioaddr, struct device *dev)
dev->mem_start, dev->mem_end-1);
if (ei_debug > 0)
- printk(version);
+ printk("%s", version);
ei_status.reset_8390 = &ac_reset_8390;
ei_status.block_input = &ac_block_input;
diff --git a/linux/src/drivers/net/apricot.c b/linux/src/drivers/net/apricot.c
index d106e50d..57fccafb 100644
--- a/linux/src/drivers/net/apricot.c
+++ b/linux/src/drivers/net/apricot.c
@@ -720,7 +720,7 @@ int apricot_probe(struct device *dev)
dev->irq = 10;
printk(" IRQ %d.\n", dev->irq);
- if (i596_debug > 0) printk(version);
+ if (i596_debug > 0) printk("%s", version);
/* The APRICOT-specific entries in the device structure. */
dev->open = &i596_open;
diff --git a/linux/src/drivers/net/at1700.c b/linux/src/drivers/net/at1700.c
index 9e42ab48..f4025f46 100644
--- a/linux/src/drivers/net/at1700.c
+++ b/linux/src/drivers/net/at1700.c
@@ -258,7 +258,7 @@ int at1700_probe1(struct device *dev, int ioaddr)
outb(0x00, ioaddr + CONFIG_1);
if (net_debug)
- printk(version);
+ printk("%s", version);
/* Initialize the device structure. */
dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
diff --git a/linux/src/drivers/net/de4x5.c b/linux/src/drivers/net/de4x5.c
index a66f0564..c85bcdbf 100644
--- a/linux/src/drivers/net/de4x5.c
+++ b/linux/src/drivers/net/de4x5.c
@@ -1308,7 +1308,7 @@ de4x5_hw_init(struct device *dev, u_long iobase))
}
if (de4x5_debug & DEBUG_VERSION) {
- printk(version);
+ printk("%s", version);
}
/* The DE4X5-specific entries in the device structure. */
diff --git a/linux/src/drivers/net/de600.c b/linux/src/drivers/net/de600.c
index 2488cd76..ce969422 100644
--- a/linux/src/drivers/net/de600.c
+++ b/linux/src/drivers/net/de600.c
@@ -644,7 +644,7 @@ de600_probe(struct device *dev)
printk("%s: D-Link DE-600 pocket adapter", dev->name);
/* Alpha testers must have the version number to report bugs. */
if (de600_debug > 1)
- printk(version);
+ printk("%s", version);
/* probe for adapter */
rx_page = 0;
diff --git a/linux/src/drivers/net/de620.c b/linux/src/drivers/net/de620.c
index ec639101..0e0c5522 100644
--- a/linux/src/drivers/net/de620.c
+++ b/linux/src/drivers/net/de620.c
@@ -843,7 +843,7 @@ de620_probe(struct device *dev)
dev->irq = irq;
if (de620_debug)
- printk(version);
+ printk("%s", version);
printk("D-Link DE-620 pocket adapter");
diff --git a/linux/src/drivers/net/depca.c b/linux/src/drivers/net/depca.c
index e1b03429..2048812d 100644
--- a/linux/src/drivers/net/depca.c
+++ b/linux/src/drivers/net/depca.c
@@ -649,7 +649,7 @@ depca_hw_init(struct device *dev, u_long ioaddr)
}
if (!status) {
if (depca_debug > 1) {
- printk(version);
+ printk("%s", version);
}
/* The DEPCA-specific entries in the device structure. */
diff --git a/linux/src/drivers/net/e2100.c b/linux/src/drivers/net/e2100.c
index 7ba12d31..be4185ab 100644
--- a/linux/src/drivers/net/e2100.c
+++ b/linux/src/drivers/net/e2100.c
@@ -68,7 +68,7 @@ static int e21_probe_list[] = {0x300, 0x280, 0x380, 0x220, 0};
#define E21_SAPROM 0x10 /* Offset to station address data. */
#define E21_IO_EXTENT 0x20
-extern inline void mem_on(short port, volatile char *mem_base,
+static inline void mem_on(short port, volatile char *mem_base,
unsigned char start_page )
{
/* This is a little weird: set the shared memory window by doing a
@@ -78,7 +78,7 @@ extern inline void mem_on(short port, volatile char *mem_base,
outb(E21_MEM_ON, port + E21_MEM_ENABLE + E21_MEM_ON);
}
-extern inline void mem_off(short port)
+static inline void mem_off(short port)
{
inb(port + E21_MEM_ENABLE);
outb(0x00, port + E21_MEM_ENABLE);
@@ -162,7 +162,7 @@ int e21_probe1(struct device *dev, int ioaddr)
outb(0, ioaddr + E21_ASIC); /* and disable the secondary interface. */
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
/* We should have a "dev" from Space.c or the static module table. */
if (dev == NULL) {
diff --git a/linux/src/drivers/net/eepro.c b/linux/src/drivers/net/eepro.c
index 2c3a6b26..3d4fc578 100644
--- a/linux/src/drivers/net/eepro.c
+++ b/linux/src/drivers/net/eepro.c
@@ -498,7 +498,7 @@ eepro_probe1(struct device *dev, short ioaddr)
}
if (net_debug)
- printk(version);
+ printk("%s", version);
/* Grab the region so we can find another board if autoIRQ fails. */
request_region(ioaddr, EEPRO_IO_EXTENT, "eepro");
diff --git a/linux/src/drivers/net/eepro100.c b/linux/src/drivers/net/eepro100.c
index 6909cdc4..d03462cd 100644
--- a/linux/src/drivers/net/eepro100.c
+++ b/linux/src/drivers/net/eepro100.c
@@ -726,7 +726,7 @@ static void *speedo_found1(struct pci_dev *pdev, void *init_dev,
eeprom[8], eeprom[9]>>8, eeprom[9] & 0xff);
for (i = 0; i < 4; i++)
if (eeprom[5] & (1<<i))
- printk(connectors[i]);
+ printk("%s", connectors[i]);
printk("\n"KERN_INFO" Primary interface chip %s PHY #%d.\n",
phys[(eeprom[6]>>8)&15], eeprom[6] & 0x1f);
if (eeprom[7] & 0x0700)
diff --git a/linux/src/drivers/net/eexpress.c b/linux/src/drivers/net/eexpress.c
index d7065509..9c816ee9 100644
--- a/linux/src/drivers/net/eexpress.c
+++ b/linux/src/drivers/net/eexpress.c
@@ -794,7 +794,7 @@ static int eexp_hw_probe(struct device *dev, unsigned short ioaddr)
}
if (net_debug)
- printk(version);
+ printk("%s", version);
dev->open = eexp_open;
dev->stop = eexp_close;
dev->hard_start_xmit = eexp_xmit;
diff --git a/linux/src/drivers/net/ewrk3.c b/linux/src/drivers/net/ewrk3.c
index f91315ff..07b0f13f 100644
--- a/linux/src/drivers/net/ewrk3.c
+++ b/linux/src/drivers/net/ewrk3.c
@@ -589,7 +589,7 @@ ewrk3_hw_init(struct device *dev, u_long iobase)
if (!status) {
if (ewrk3_debug > 1) {
- printk(version);
+ printk("%s", version);
}
/* The EWRK3-specific entries in the device structure. */
diff --git a/linux/src/drivers/net/fmv18x.c b/linux/src/drivers/net/fmv18x.c
index 121dd0bb..b29ddf00 100644
--- a/linux/src/drivers/net/fmv18x.c
+++ b/linux/src/drivers/net/fmv18x.c
@@ -249,7 +249,7 @@ int fmv18x_probe1(struct device *dev, short ioaddr)
outb(dev->if_port, ioaddr + MODE13);
if (net_debug)
- printk(version);
+ printk("%s", version);
/* Initialize the device structure. */
dev->priv = kmalloc(sizeof(struct net_local), GFP_KERNEL);
diff --git a/linux/src/drivers/net/hp-plus.c b/linux/src/drivers/net/hp-plus.c
index c8e36111..c2b71169 100644
--- a/linux/src/drivers/net/hp-plus.c
+++ b/linux/src/drivers/net/hp-plus.c
@@ -164,7 +164,7 @@ int hpp_probe1(struct device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
printk("%s: %s at %#3x,", dev->name, name, ioaddr);
diff --git a/linux/src/drivers/net/hp.c b/linux/src/drivers/net/hp.c
index 741924f9..6ddbfd2e 100644
--- a/linux/src/drivers/net/hp.c
+++ b/linux/src/drivers/net/hp.c
@@ -136,7 +136,7 @@ int hp_probe1(struct device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
printk("%s: %s (ID %02x) at %#3x,", dev->name, name, board_id, ioaddr);
diff --git a/linux/src/drivers/net/lance.c b/linux/src/drivers/net/lance.c
index f64f0fee..fe3cf687 100644
--- a/linux/src/drivers/net/lance.c
+++ b/linux/src/drivers/net/lance.c
@@ -674,7 +674,7 @@ int lance_probe1(struct device *dev, int ioaddr, int irq, int options)
}
if (lance_debug > 0 && did_version++ == 0)
- printk(version);
+ printk("%s", version);
/* The LANCE-specific entries in the device structure. */
dev->open = lance_open;
diff --git a/linux/src/drivers/net/ne.c b/linux/src/drivers/net/ne.c
index 825b768e..ea2f9290 100644
--- a/linux/src/drivers/net/ne.c
+++ b/linux/src/drivers/net/ne.c
@@ -291,7 +291,7 @@ static int ne_probe1(struct device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
printk("NE*000 ethercard probe at %#3x:", ioaddr);
diff --git a/linux/src/drivers/net/pci-scan.c b/linux/src/drivers/net/pci-scan.c
index 60525b76..ffb7b128 100644
--- a/linux/src/drivers/net/pci-scan.c
+++ b/linux/src/drivers/net/pci-scan.c
@@ -31,7 +31,7 @@ static int min_pci_latency = 32;
#if ! defined(__KERNEL__)
#define __KERNEL__ 1
#endif
-#if !defined(__OPTIMIZE__)
+#if !defined(__OPTIMIZE__) && /* Mach glue, we think this is ok now: */ 0
#warning You must compile this file with the correct options!
#warning See the last lines of the source file.
#error You must compile this driver with the proper options, including "-O".
diff --git a/linux/src/drivers/net/pcnet32.c b/linux/src/drivers/net/pcnet32.c
index 02e70982..da0e8709 100644
--- a/linux/src/drivers/net/pcnet32.c
+++ b/linux/src/drivers/net/pcnet32.c
@@ -344,7 +344,7 @@ static int pcnet32_probe1(struct device *dev, unsigned int ioaddr, unsigned char
dev->irq = irq_line;
if (pcnet32_debug > 0)
- printk(version);
+ printk("%s", version);
/* The PCNET32-specific entries in the device structure. */
dev->open = &pcnet32_open;
diff --git a/linux/src/drivers/net/seeq8005.c b/linux/src/drivers/net/seeq8005.c
index c4d48521..4adebdea 100644
--- a/linux/src/drivers/net/seeq8005.c
+++ b/linux/src/drivers/net/seeq8005.c
@@ -274,7 +274,7 @@ static int seeq8005_probe1(struct device *dev, int ioaddr)
dev = init_etherdev(0, sizeof(struct net_local));
if (net_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
printk("%s: %s found at %#3x, ", dev->name, "seeq8005", ioaddr);
diff --git a/linux/src/drivers/net/smc-ultra.c b/linux/src/drivers/net/smc-ultra.c
index 074a235b..f593aeb6 100644
--- a/linux/src/drivers/net/smc-ultra.c
+++ b/linux/src/drivers/net/smc-ultra.c
@@ -156,7 +156,7 @@ int ultra_probe1(struct device *dev, int ioaddr)
dev = init_etherdev(0, 0);
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
model_name = (idreg & 0xF0) == 0x20 ? "SMC Ultra" : "SMC EtherEZ";
diff --git a/linux/src/drivers/net/smc-ultra32.c b/linux/src/drivers/net/smc-ultra32.c
index f616e259..6cde4c27 100644
--- a/linux/src/drivers/net/smc-ultra32.c
+++ b/linux/src/drivers/net/smc-ultra32.c
@@ -153,7 +153,7 @@ int ultra32_probe1(struct device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
model_name = "SMC Ultra32";
diff --git a/linux/src/drivers/net/sundance.c b/linux/src/drivers/net/sundance.c
index 47f32ebd..37231644 100644
--- a/linux/src/drivers/net/sundance.c
+++ b/linux/src/drivers/net/sundance.c
@@ -986,7 +986,7 @@ static int start_tx(struct sk_buff *skb, struct net_device *dev)
dev->trans_start = jiffies;
if (np->msg_level & NETIF_MSG_TX_QUEUED) {
- printk(KERN_DEBUG "%s: Transmit frame #%d len %ld queued in slot %ld.\n",
+ printk(KERN_DEBUG "%s: Transmit frame #%d len %ld queued in slot %u.\n",
dev->name, np->cur_tx, skb->len, entry);
}
return 0;
diff --git a/linux/src/drivers/net/tlan.c b/linux/src/drivers/net/tlan.c
index 11e12bbc..fedc11f3 100644
--- a/linux/src/drivers/net/tlan.c
+++ b/linux/src/drivers/net/tlan.c
@@ -1132,7 +1132,7 @@ u32 TLan_HandleTxEOF( struct device *dev, u16 host_int )
if ( head_list->cStat & TLAN_CSTAT_EOC )
eoc = 1;
- if ( ! head_list->cStat & TLAN_CSTAT_FRM_CMP ) {
+ if (!(head_list->cStat & TLAN_CSTAT_FRM_CMP)) {
printk( "TLAN: Received interrupt for uncompleted TX frame.\n" );
}
@@ -1244,7 +1244,7 @@ u32 TLan_HandleRxEOF( struct device *dev, u16 host_int )
eoc = 1;
}
- if ( ! head_list->cStat & TLAN_CSTAT_FRM_CMP ) {
+ if (!(head_list->cStat & TLAN_CSTAT_FRM_CMP)) {
printk( "TLAN: Received interrupt for uncompleted RX frame.\n" );
} else if ( bbuf ) {
skb = dev_alloc_skb( head_list->frameSize + 7 );
diff --git a/linux/src/drivers/net/wd.c b/linux/src/drivers/net/wd.c
index a737a01d..dd879021 100644
--- a/linux/src/drivers/net/wd.c
+++ b/linux/src/drivers/net/wd.c
@@ -137,7 +137,7 @@ int wd_probe1(struct device *dev, int ioaddr)
}
if (ei_debug && version_printed++ == 0)
- printk(version);
+ printk("%s", version);
printk("%s: WD80x3 at %#3x, ", dev->name, ioaddr);
for (i = 0; i < 6; i++)
diff --git a/linux/src/drivers/scsi/AM53C974.c b/linux/src/drivers/scsi/AM53C974.c
index 5178ccf7..da139ced 100644
--- a/linux/src/drivers/scsi/AM53C974.c
+++ b/linux/src/drivers/scsi/AM53C974.c
@@ -1919,7 +1919,7 @@ if ((statreg & STATREG_PHASE) != PHASE_MSGIN) {
goto EXIT_ABORT; }
msg[0] = AM53C974_read_8(FFREG);
-if (!msg[0] & 0x80) {
+if (!(msg[0] & 0x80)) {
printk("scsi%d: error: expecting IDENTIFY message, got ", instance->host_no);
print_msg(msg);
hostdata->aborted = 1;
diff --git a/linux/src/drivers/scsi/FlashPoint.c b/linux/src/drivers/scsi/FlashPoint.c
index aae35c03..8d2f1020 100644
--- a/linux/src/drivers/scsi/FlashPoint.c
+++ b/linux/src/drivers/scsi/FlashPoint.c
@@ -3756,17 +3756,17 @@ STATIC int SetDevSyncRate(PSCCBcard pCurrCard, PUCB p_ucb)
}
if(currTar_Info->TarEEValue && EE_SYNC_MASK == syncVal)
return(0);
- currTar_Info->TarEEValue = (currTar_Info->TarEEValue & !EE_SYNC_MASK)
+ currTar_Info->TarEEValue = (!(EE_SYNC_MASK & currTar_Info->TarEEValue))
| syncVal;
syncOffset = (SYNC_RATE_TBL + scsiID) / 2;
temp2.tempw = utilEERead(ioPort, syncOffset);
if(scsiID & 0x01)
{
- temp2.tempb[0] = (temp2.tempb[0] & !EE_SYNC_MASK) | syncVal;
+ temp2.tempb[0] = (!(EE_SYNC_MASK & temp2.tempb[0])) | syncVal;
}
else
{
- temp2.tempb[1] = (temp2.tempb[1] & !EE_SYNC_MASK) | syncVal;
+ temp2.tempb[1] = (!(EE_SYNC_MASK & temp2.tempb[1])) | syncVal;
}
utilEEWriteOnOff(ioPort, 1);
utilEEWrite(ioPort, temp2.tempw, syncOffset);
@@ -3845,7 +3845,7 @@ int SetDevWideMode(PSCCBcard pCurrCard,PUCB p_ucb)
}
else
{
- if(!currTar_Info->TarEEValue & EE_WIDE_SCSI)
+ if(!(currTar_Info->TarEEValue & EE_WIDE_SCSI))
{
return(0);
}
@@ -3854,18 +3854,18 @@ int SetDevWideMode(PSCCBcard pCurrCard,PUCB p_ucb)
scsiWideMode = 0;
}
}
- currTar_Info->TarEEValue = (currTar_Info->TarEEValue & !EE_WIDE_SCSI)
+ currTar_Info->TarEEValue = (!(EE_WIDE_SCSI & currTar_Info->TarEEValue))
| scsiWideMode;
syncOffset = (SYNC_RATE_TBL + scsiID) / 2;
temp2.tempw = utilEERead(ioPort, syncOffset);
if(scsiID & 0x01)
{
- temp2.tempb[0] = (temp2.tempb[0] & !EE_WIDE_SCSI) | scsiWideMode;
+ temp2.tempb[0] = (!(EE_WIDE_SCSI & temp2.tempb[0])) | scsiWideMode;
}
else
{
- temp2.tempb[1] = (temp2.tempb[1] & !EE_WIDE_SCSI) | scsiWideMode;
+ temp2.tempb[1] = (!(EE_WIDE_SCSI & temp2.tempb[1])) | scsiWideMode;
}
utilEEWriteOnOff(ioPort, 1);
utilEEWrite(ioPort, temp2.tempw, syncOffset);
diff --git a/linux/src/drivers/scsi/NCR5380.c b/linux/src/drivers/scsi/NCR5380.c
index 295f2ad2..4f085e9b 100644
--- a/linux/src/drivers/scsi/NCR5380.c
+++ b/linux/src/drivers/scsi/NCR5380.c
@@ -1949,7 +1949,7 @@ static int do_abort (struct Scsi_Host *host) {
* the target sees, so we just handshake.
*/
- while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ);
+ while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ));
NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp));
@@ -2900,7 +2900,7 @@ static void NCR5380_reselect (struct Scsi_Host *instance) {
NCR5380_transfer_pio(instance, &phase, &len, &data);
- if (!msg[0] & 0x80) {
+ if (!(msg[0] & 0x80)) {
printk("scsi%d : expecting IDENTIFY message, got ",
instance->host_no);
print_msg(msg);
diff --git a/linux/src/drivers/scsi/advansys.c b/linux/src/drivers/scsi/advansys.c
index ef61fac9..7aea67c8 100644
--- a/linux/src/drivers/scsi/advansys.c
+++ b/linux/src/drivers/scsi/advansys.c
@@ -4715,7 +4715,6 @@ advansys_detect(Scsi_Host_Template *tpnt)
ep->adapter_info[3] = asc_dvc_varp->cfg->adapter_info[3];
ep->adapter_info[4] = asc_dvc_varp->cfg->adapter_info[4];
ep->adapter_info[5] = asc_dvc_varp->cfg->adapter_info[5];
- ep->adapter_info[6] = asc_dvc_varp->cfg->adapter_info[6];
/*
* Modify board configuration.
diff --git a/linux/src/drivers/scsi/t128.c b/linux/src/drivers/scsi/t128.c
index d4c7452b..198e910b 100644
--- a/linux/src/drivers/scsi/t128.c
+++ b/linux/src/drivers/scsi/t128.c
@@ -327,7 +327,7 @@ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst,
for (; i; --i) {
while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
#else
- while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+ while (!((instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY)) barrier();
for (; i; --i) {
#endif
*d++ = *reg;
@@ -370,7 +370,7 @@ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src
for (; i; --i) {
while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
#else
- while (!(instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY) barrier();
+ while (!((instance->base[T_STATUS_REG_OFFSET]) & T_ST_RDY)) barrier();
for (; i; --i) {
#endif
*reg = *s++;
diff --git a/linux/src/include/asm-i386/bitops.h b/linux/src/include/asm-i386/bitops.h
index fc4cf192..e2a4c14a 100644
--- a/linux/src/include/asm-i386/bitops.h
+++ b/linux/src/include/asm-i386/bitops.h
@@ -28,7 +28,7 @@ struct __dummy { unsigned long a[100]; };
#define ADDR (*(struct __dummy *) addr)
#define CONST_ADDR (*(const struct __dummy *) addr)
-extern __inline__ int set_bit(int nr, SMPVOL void * addr)
+static __inline__ int set_bit(int nr, SMPVOL void * addr)
{
int oldbit;
@@ -39,7 +39,7 @@ extern __inline__ int set_bit(int nr, SMPVOL void * addr)
return oldbit;
}
-extern __inline__ int clear_bit(int nr, SMPVOL void * addr)
+static __inline__ int clear_bit(int nr, SMPVOL void * addr)
{
int oldbit;
@@ -50,7 +50,7 @@ extern __inline__ int clear_bit(int nr, SMPVOL void * addr)
return oldbit;
}
-extern __inline__ int change_bit(int nr, SMPVOL void * addr)
+static __inline__ int change_bit(int nr, SMPVOL void * addr)
{
int oldbit;
@@ -61,7 +61,7 @@ extern __inline__ int change_bit(int nr, SMPVOL void * addr)
return oldbit;
}
-extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
+static __inline__ int test_and_set_bit(int nr, volatile void * addr)
{
int oldbit;
@@ -72,7 +72,7 @@ extern __inline__ int test_and_set_bit(int nr, volatile void * addr)
return oldbit;
}
-extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
+static __inline__ int test_and_clear_bit(int nr, volatile void * addr)
{
int oldbit;
@@ -83,7 +83,7 @@ extern __inline__ int test_and_clear_bit(int nr, volatile void * addr)
return oldbit;
}
-extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
+static __inline__ int test_and_change_bit(int nr, volatile void * addr)
{
int oldbit;
@@ -98,7 +98,7 @@ extern __inline__ int test_and_change_bit(int nr, volatile void * addr)
/*
* This routine doesn't need to be atomic.
*/
-extern __inline__ int test_bit(int nr, const SMPVOL void * addr)
+static __inline__ int test_bit(int nr, const SMPVOL void * addr)
{
return ((1UL << (nr & 31)) & (((const unsigned int *) addr)[nr >> 5])) != 0;
}
@@ -106,8 +106,9 @@ extern __inline__ int test_bit(int nr, const SMPVOL void * addr)
/*
* Find-bit routines..
*/
-extern __inline__ int find_first_zero_bit(void * addr, unsigned size)
+static __inline__ int find_first_zero_bit(void * addr, unsigned size)
{
+ int d0, d1, d2;
int res;
if (!size)
@@ -123,13 +124,12 @@ extern __inline__ int find_first_zero_bit(void * addr, unsigned size)
"1:\tsubl %%ebx,%%edi\n\t"
"shll $3,%%edi\n\t"
"addl %%edi,%%edx"
- :"=d" (res)
- :"c" ((size + 31) >> 5), "D" (addr), "b" (addr)
- :"ax", "cx", "di");
+ :"=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
+ :"1" ((size + 31) >> 5), "2" (addr), "b" (addr));
return res;
}
-extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
+static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
int set = 0, bit = offset & 31, res;
@@ -160,7 +160,7 @@ extern __inline__ int find_next_zero_bit (void * addr, int size, int offset)
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
-extern __inline__ unsigned long ffz(unsigned long word)
+static __inline__ unsigned long ffz(unsigned long word)
{
__asm__("bsfl %1,%0"
:"=r" (word)
@@ -176,7 +176,7 @@ extern __inline__ unsigned long ffz(unsigned long word)
* differs in spirit from the above ffz (man ffs).
*/
-extern __inline__ int ffs(int x)
+static __inline__ int ffs(int x)
{
int r;
diff --git a/linux/src/include/asm-i386/io.h b/linux/src/include/asm-i386/io.h
index f961f1d2..34cf105b 100644
--- a/linux/src/include/asm-i386/io.h
+++ b/linux/src/include/asm-i386/io.h
@@ -45,12 +45,12 @@
* make the kernel segment mapped at 0, we need to do translation
* on the i386 as well)
*/
-extern inline unsigned long virt_to_phys(volatile void * address)
+static inline unsigned long virt_to_phys(volatile void * address)
{
return (unsigned long) _kvtophys(address);
}
-extern inline void * phys_to_virt(unsigned long address)
+static inline void * phys_to_virt(unsigned long address)
{
return (void *) phystokv(address);
}
@@ -90,7 +90,7 @@ extern inline void * phys_to_virt(unsigned long address)
*/
#define __OUT1(s,x) \
-extern inline void __out##s(unsigned x value, unsigned short port) {
+static inline void __out##s(unsigned x value, unsigned short port) {
#define __OUT2(s,s1,s2) \
__asm__ __volatile__ ("out" #s " %" s1 "0,%" s2 "1"
@@ -102,7 +102,7 @@ __OUT1(s##_p,x) __OUT2(s,s1,"w") : : "a" (value), "d" (port)); SLOW_DOWN_IO; } \
__OUT1(s##c_p,x) __OUT2(s,s1,"") : : "a" (value), "id" (port)); SLOW_DOWN_IO; }
#define __IN1(s) \
-extern inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v;
+static inline RETURN_TYPE __in##s(unsigned short port) { RETURN_TYPE _v;
#define __IN2(s,s1,s2) \
__asm__ __volatile__ ("in" #s " %" s2 "1,%" s1 "0"
@@ -114,12 +114,12 @@ __IN1(s##_p) __IN2(s,s1,"w") : "=a" (_v) : "d" (port) ,##i ); SLOW_DOWN_IO; retu
__IN1(s##c_p) __IN2(s,s1,"") : "=a" (_v) : "id" (port) ,##i ); SLOW_DOWN_IO; return _v; }
#define __INS(s) \
-extern inline void ins##s(unsigned short port, void * addr, unsigned long count) \
+static inline void ins##s(unsigned short port, void * addr, unsigned long count) \
{ __asm__ __volatile__ ("cld ; rep ; ins" #s \
: "=D" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
#define __OUTS(s) \
-extern inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
+static inline void outs##s(unsigned short port, const void * addr, unsigned long count) \
{ __asm__ __volatile__ ("cld ; rep ; outs" #s \
: "=S" (addr), "=c" (count) : "d" (port),"0" (addr),"1" (count)); }
diff --git a/linux/src/include/asm-i386/segment.h b/linux/src/include/asm-i386/segment.h
index 5f8af993..d23aa173 100644
--- a/linux/src/include/asm-i386/segment.h
+++ b/linux/src/include/asm-i386/segment.h
@@ -60,7 +60,11 @@ static inline void __attribute__((always_inline)) __put_user(unsigned long x, vo
:"ir" (x), "m" (*__sd(y)));
break;
default:
+#ifdef __OPTIMIZE__
bad_user_access_length();
+#else
+ asm volatile("ud2");
+#endif
}
}
@@ -85,7 +89,11 @@ static inline unsigned long __attribute__((always_inline)) __get_user(const void
:"m" (*__const_sd(y)));
return result;
default:
+#ifdef __OPTIMIZE__
return bad_user_access_length();
+#else
+ asm volatile("ud2");
+#endif
}
}
diff --git a/linux/src/include/asm-i386/semaphore.h b/linux/src/include/asm-i386/semaphore.h
index 1486d1c1..18e12c10 100644
--- a/linux/src/include/asm-i386/semaphore.h
+++ b/linux/src/include/asm-i386/semaphore.h
@@ -30,6 +30,10 @@ struct semaphore {
#define MUTEX ((struct semaphore) { 1, 0, 0, NULL })
#define MUTEX_LOCKED ((struct semaphore) { 0, 0, 0, NULL })
+/* Special register calling convention:
+ * eax contains return address
+ * ecx contains semaphore address
+ */
asmlinkage void down_failed(void /* special register calling convention */);
asmlinkage void up_wakeup(void /* special register calling convention */);
@@ -41,20 +45,21 @@ extern void __up(struct semaphore * sem);
* "down_failed" is a special asm handler that calls the C
* routine that actually waits. See arch/i386/lib/semaphore.S
*/
-extern inline void down(struct semaphore * sem)
+static inline void down(struct semaphore * sem)
{
+ int d0;
__asm__ __volatile__(
"# atomic down operation\n\t"
"movl $1f,%%eax\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%0)\n\t"
+ "decl %1\n\t"
"js " SYMBOL_NAME_STR(down_failed) "\n"
"1:\n"
- :/* no outputs */
+ :"=&a" (d0), "=m" (sem->count)
:"c" (sem)
- :"ax","dx","memory");
+ :"memory");
}
/*
@@ -81,7 +86,7 @@ asmlinkage int down_failed_interruptible(void); /* params in registers */
* process can be killed. The down_failed_interruptible routine
* returns negative for signalled and zero for semaphore acquired.
*/
-extern inline int down_interruptible(struct semaphore * sem)
+static inline int down_interruptible(struct semaphore * sem)
{
int ret ;
@@ -91,13 +96,13 @@ extern inline int down_interruptible(struct semaphore * sem)
#ifdef __SMP__
"lock ; "
#endif
- "decl 0(%1)\n\t"
+ "decl %1\n\t"
"js " SYMBOL_NAME_STR(down_failed_interruptible) "\n\t"
"xorl %%eax,%%eax\n"
"2:\n"
- :"=a" (ret)
+ :"=&a" (ret), "=m" (sem->count)
:"c" (sem)
- :"ax","dx","memory");
+ :"memory");
return(ret) ;
}
@@ -108,20 +113,21 @@ extern inline int down_interruptible(struct semaphore * sem)
* The default case (no contention) will result in NO
* jumps for both down() and up().
*/
-extern inline void up(struct semaphore * sem)
+static inline void up(struct semaphore * sem)
{
+ int d0;
__asm__ __volatile__(
"# atomic up operation\n\t"
"movl $1f,%%eax\n\t"
#ifdef __SMP__
"lock ; "
#endif
- "incl 0(%0)\n\t"
+ "incl %1\n\t"
"jle " SYMBOL_NAME_STR(up_wakeup)
"\n1:"
- :/* no outputs */
+ :"=&a" (d0), "=m" (sem->count)
:"c" (sem)
- :"ax", "dx", "memory");
+ :"memory");
}
#endif
diff --git a/linux/src/include/asm-i386/termios.h b/linux/src/include/asm-i386/termios.h
index 76551dea..9f65b4d6 100644
--- a/linux/src/include/asm-i386/termios.h
+++ b/linux/src/include/asm-i386/termios.h
@@ -61,7 +61,7 @@ struct termio {
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
-extern inline void trans_from_termio(struct termio * termio,
+static inline void trans_from_termio(struct termio * termio,
struct termios * termios)
{
#define SET_LOW_BITS(x,y) (*(unsigned short *)(&x) = (y))
@@ -76,7 +76,7 @@ extern inline void trans_from_termio(struct termio * termio,
/*
* Translate a "termios" structure into a "termio". Ugh.
*/
-extern inline void trans_to_termio(struct termios * termios,
+static inline void trans_to_termio(struct termios * termios,
struct termio * termio)
{
termio->c_iflag = termios->c_iflag;
diff --git a/linux/src/include/linux/compiler-gcc.h b/linux/src/include/linux/compiler-gcc.h
index 59e4028e..b1a0be0c 100644
--- a/linux/src/include/linux/compiler-gcc.h
+++ b/linux/src/include/linux/compiler-gcc.h
@@ -9,7 +9,9 @@
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
+#ifndef barrier
#define barrier() __asm__ __volatile__("": : :"memory")
+#endif /* barrier */
/*
* This macro obfuscates arithmetic on a variable address so that gcc
diff --git a/linux/src/include/linux/compiler-gcc5.h b/linux/src/include/linux/compiler-gcc5.h
new file mode 100644
index 00000000..efee4937
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc5.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc5.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/linux/src/include/linux/compiler-gcc6.h b/linux/src/include/linux/compiler-gcc6.h
new file mode 100644
index 00000000..cc2e86a9
--- /dev/null
+++ b/linux/src/include/linux/compiler-gcc6.h
@@ -0,0 +1,67 @@
+#ifndef __LINUX_COMPILER_H
+#error "Please don't include <linux/compiler-gcc6.h> directly, include <linux/compiler.h> instead."
+#endif
+
+#define __used __attribute__((__used__))
+#define __must_check __attribute__((warn_unused_result))
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
+
+/* Mark functions as cold. gcc will assume any path leading to a call
+ to them will be unlikely. This means a lot of manual unlikely()s
+ are unnecessary now for any paths leading to the usual suspects
+ like BUG(), printk(), panic() etc. [but let's keep them for now for
+ older compilers]
+
+ Early snapshots of gcc 4.3 don't support this and we can't detect this
+ in the preprocessor, but we can live with this because they're unreleased.
+ Maketime probing would be overkill here.
+
+ gcc also has a __attribute__((__hot__)) to move hot functions into
+ a special section, but I don't see any sense in this right now in
+ the kernel context */
+#define __cold __attribute__((__cold__))
+
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
+
+#ifndef __CHECKER__
+# define __compiletime_warning(message) __attribute__((warning(message)))
+# define __compiletime_error(message) __attribute__((error(message)))
+#endif /* __CHECKER__ */
+
+/*
+ * Mark a position in code as unreachable. This can be used to
+ * suppress control flow warnings after asm blocks that transfer
+ * control elsewhere.
+ *
+ * Early snapshots of gcc 4.5 don't support this and we can't detect
+ * this in the preprocessor, but we can live with this because they're
+ * unreleased. Really, we need to have autoconf for the kernel.
+ */
+#define unreachable() __builtin_unreachable()
+
+/* Mark a function definition as prohibited from being cloned. */
+#define __noclone __attribute__((__noclone__))
+
+/*
+ * Tell the optimizer that something else uses this function or variable.
+ */
+#define __visible __attribute__((externally_visible))
+
+/*
+ * GCC 'asm goto' miscompiles certain code sequences:
+ *
+ * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
+ *
+ * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
+ *
+ * (asm goto is automatically volatile - the naming reflects this.)
+ */
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
+
+#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
+#define __HAVE_BUILTIN_BSWAP32__
+#define __HAVE_BUILTIN_BSWAP64__
+#define __HAVE_BUILTIN_BSWAP16__
+#endif /* CONFIG_ARCH_USE_BUILTIN_BSWAP */
+
+#define KASAN_ABI_VERSION 4
diff --git a/linux/src/include/linux/compiler.h b/linux/src/include/linux/compiler.h
index 320d6c94..eb3dd949 100644
--- a/linux/src/include/linux/compiler.h
+++ b/linux/src/include/linux/compiler.h
@@ -143,8 +143,12 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
#endif /* CONFIG_PROFILE_ALL_BRANCHES */
#else
-# define likely(x) __builtin_expect(!!(x), 1)
-# define unlikely(x) __builtin_expect(!!(x), 0)
+# ifndef likely
+# define likely(x) __builtin_expect(!!(x), 1)
+# endif /* likely */
+# ifndef unlikely
+# define unlikely(x) __builtin_expect(!!(x), 0)
+# endif /* unlikely */
#endif
/* Optimization barrier */
diff --git a/linux/src/include/linux/interrupt.h b/linux/src/include/linux/interrupt.h
index 5765260d..02244756 100644
--- a/linux/src/include/linux/interrupt.h
+++ b/linux/src/include/linux/interrupt.h
@@ -43,14 +43,14 @@ enum {
ISICOM_BH
};
-extern inline void init_bh(int nr, void (*routine)(void))
+static inline void init_bh(int nr, void (*routine)(void))
{
bh_base[nr] = routine;
bh_mask_count[nr] = 0;
bh_mask |= 1 << nr;
}
-extern inline void mark_bh(int nr)
+static inline void mark_bh(int nr)
{
set_bit(nr, &bh_active);
}
@@ -59,13 +59,13 @@ extern inline void mark_bh(int nr)
* These use a mask count to correctly handle
* nested disable/enable calls
*/
-extern inline void disable_bh(int nr)
+static inline void disable_bh(int nr)
{
bh_mask &= ~(1 << nr);
bh_mask_count[nr]++;
}
-extern inline void enable_bh(int nr)
+static inline void enable_bh(int nr)
{
if (!--bh_mask_count[nr])
bh_mask |= 1 << nr;
@@ -75,13 +75,13 @@ extern inline void enable_bh(int nr)
* start_bh_atomic/end_bh_atomic also nest
* naturally by using a counter
*/
-extern inline void start_bh_atomic(void)
+static inline void start_bh_atomic(void)
{
intr_count++;
barrier();
}
-extern inline void end_bh_atomic(void)
+static inline void end_bh_atomic(void)
{
barrier();
intr_count--;
diff --git a/linux/src/include/linux/string.h b/linux/src/include/linux/string.h
index 214503c2..62ff8802 100644
--- a/linux/src/include/linux/string.h
+++ b/linux/src/include/linux/string.h
@@ -12,25 +12,33 @@ extern "C" {
#endif
extern char * ___strtok;
+#if 0
extern char * strcpy(char *,const char *);
extern char * strncpy(char *,const char *, __kernel_size_t);
extern char * strcat(char *, const char *);
extern char * strncat(char *, const char *, __kernel_size_t);
extern char * strchr(const char *,int);
extern char * strrchr(const char *,int);
+#endif
extern char * strpbrk(const char *,const char *);
extern char * strtok(char *,const char *);
extern char * strstr(const char *,const char *);
+#if 0
extern __kernel_size_t strlen(const char *);
extern __kernel_size_t strnlen(const char *,__kernel_size_t);
+#endif
extern __kernel_size_t strspn(const char *,const char *);
+#if 0
extern int strcmp(const char *,const char *);
extern int strncmp(const char *,const char *,__kernel_size_t);
+#endif
extern void * memset(void *,int,__kernel_size_t);
extern void * memcpy(void *,const void *,__kernel_size_t);
+#if 0
extern void * memmove(void *,const void *,__kernel_size_t);
extern void * memscan(void *,int,__kernel_size_t);
+#endif
extern int memcmp(const void *,const void *,__kernel_size_t);
/*
diff --git a/linux/src/include/net/route.h b/linux/src/include/net/route.h
index 7bf32d0a..2af1a419 100644
--- a/linux/src/include/net/route.h
+++ b/linux/src/include/net/route.h
@@ -105,30 +105,30 @@ extern unsigned ip_rt_bh_mask;
extern struct rtable *ip_rt_hash_table[RT_HASH_DIVISOR];
extern void rt_free(struct rtable * rt);
-extern __inline__ void ip_rt_fast_lock(void)
+static __inline__ void ip_rt_fast_lock(void)
{
atomic_inc(&ip_rt_lock);
}
-extern __inline__ void ip_rt_fast_unlock(void)
+static __inline__ void ip_rt_fast_unlock(void)
{
atomic_dec(&ip_rt_lock);
}
-extern __inline__ void ip_rt_unlock(void)
+static __inline__ void ip_rt_unlock(void)
{
if (atomic_dec_and_test(&ip_rt_lock) && ip_rt_bh_mask)
ip_rt_run_bh();
}
-extern __inline__ unsigned ip_rt_hash_code(__u32 addr)
+static __inline__ unsigned ip_rt_hash_code(__u32 addr)
{
unsigned tmp = addr + (addr>>16);
return (tmp + (tmp>>8)) & 0xFF;
}
-extern __inline__ void ip_rt_put(struct rtable * rt)
+static __inline__ void ip_rt_put(struct rtable * rt)
#ifndef MODULE
{
/* If this rtable entry is not in the cache, we'd better free
@@ -142,9 +142,9 @@ extern __inline__ void ip_rt_put(struct rtable * rt)
#endif
#ifdef CONFIG_KERNELD
-extern struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev);
+static struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev);
#else
-extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev)
+static __inline__ struct rtable * ip_rt_route(__u32 daddr, int local, struct device *dev)
#ifndef MODULE
{
struct rtable * rth;
@@ -170,7 +170,7 @@ extern __inline__ struct rtable * ip_rt_route(__u32 daddr, int local, struct dev
#endif
#endif
-extern __inline__ struct rtable * ip_check_route(struct rtable ** rp, __u32 daddr,
+static __inline__ struct rtable * ip_check_route(struct rtable ** rp, __u32 daddr,
int local, struct device *dev)
{
struct rtable * rt = *rp;
diff --git a/linux/src/include/net/sock.h b/linux/src/include/net/sock.h
index 7a3ec03f..25a90444 100644
--- a/linux/src/include/net/sock.h
+++ b/linux/src/include/net/sock.h
@@ -559,7 +559,7 @@ extern struct sk_buff *sock_alloc_send_skb(struct sock *skb,
* packet ever received.
*/
-extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
return -ENOMEM;
@@ -571,7 +571,7 @@ extern __inline__ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
return 0;
}
-extern __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+static __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if (sk->rmem_alloc + skb->truesize >= sk->rcvbuf)
return -ENOMEM;
@@ -587,7 +587,7 @@ extern __inline__ int __sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
* Recover an error report and clear atomically
*/
-extern __inline__ int sock_error(struct sock *sk)
+static __inline__ int sock_error(struct sock *sk)
{
int err=xchg(&sk->err,0);
return -err;
diff --git a/util/atoi.c b/util/atoi.c
index 64816b9d..e56f50d7 100644
--- a/util/atoi.c
+++ b/util/atoi.c
@@ -91,11 +91,11 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
int
mach_atoi(cp, nump)
-u_char *cp;
-int *nump;
+const u_char *cp;
+int *nump;
{
- int number;
- u_char *original;
+ int number;
+ const u_char *original;
original = cp;
for (number = 0; ('0' <= *cp) && (*cp <= '9'); cp++)
diff --git a/util/atoi.h b/util/atoi.h
index 921b1e81..47adb42e 100644
--- a/util/atoi.h
+++ b/util/atoi.h
@@ -62,6 +62,6 @@ WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#include <sys/types.h>
#define MACH_ATOI_DEFAULT -1
-extern int mach_atoi (u_char *, int *);
+extern int mach_atoi (const u_char *, int *);
#endif /* _UTIL_ATOI_H_ */
diff --git a/version.m4 b/version.m4
index 4e1a2fd9..ef5ee1ef 100644
--- a/version.m4
+++ b/version.m4
@@ -1,4 +1,4 @@
m4_define([AC_PACKAGE_NAME],[GNU Mach])
-m4_define([AC_PACKAGE_VERSION],[1.4])
+m4_define([AC_PACKAGE_VERSION],[1.6])
m4_define([AC_PACKAGE_BUGREPORT],[bug-hurd@gnu.org])
m4_define([AC_PACKAGE_TARNAME],[gnumach])
diff --git a/vm/memory_object.c b/vm/memory_object.c
index e281c6a3..097ed23d 100644
--- a/vm/memory_object.c
+++ b/vm/memory_object.c
@@ -82,24 +82,19 @@ decl_simple_lock_data(,memory_manager_default_lock)
* argument conversion. Explicit deallocation is necessary.
*/
-kern_return_t memory_object_data_supply(object, offset, data_copy, data_cnt,
- lock_value, precious, reply_to, reply_to_type)
- register
- vm_object_t object;
- register
- vm_offset_t offset;
- vm_map_copy_t data_copy;
- unsigned int data_cnt;
- vm_prot_t lock_value;
- boolean_t precious;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_data_supply(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_map_copy_t data_copy,
+ unsigned int data_cnt,
+ vm_prot_t lock_value,
+ boolean_t precious,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result = KERN_SUCCESS;
vm_offset_t error_offset = 0;
- register
vm_page_t m;
- register
vm_page_t data_m;
vm_size_t original_length;
vm_offset_t original_offset;
@@ -307,29 +302,26 @@ retry_lookup:
return(result);
}
-
/*
* If successful, destroys the map copy object.
*/
-kern_return_t memory_object_data_provided(object, offset, data, data_cnt,
- lock_value)
- vm_object_t object;
- vm_offset_t offset;
- pointer_t data;
- unsigned int data_cnt;
- vm_prot_t lock_value;
+kern_return_t memory_object_data_provided(
+ vm_object_t object,
+ vm_offset_t offset,
+ pointer_t data,
+ unsigned int data_cnt,
+ vm_prot_t lock_value)
{
return memory_object_data_supply(object, offset, (vm_map_copy_t) data,
data_cnt, lock_value, FALSE, IP_NULL,
0);
}
-
-kern_return_t memory_object_data_error(object, offset, size, error_value)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
- kern_return_t error_value;
+kern_return_t memory_object_data_error(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ kern_return_t error_value)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -337,16 +329,11 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
if (size != round_page(size))
return(KERN_INVALID_ARGUMENT);
-#ifdef lint
- /* Error value is ignored at this time */
- error_value++;
-#endif
-
vm_object_lock(object);
offset -= object->paging_offset;
while (size != 0) {
- register vm_page_t m;
+ vm_page_t m;
m = vm_page_lookup(object, offset);
if ((m != VM_PAGE_NULL) && m->busy && m->absent) {
@@ -370,10 +357,10 @@ kern_return_t memory_object_data_error(object, offset, size, error_value)
return(KERN_SUCCESS);
}
-kern_return_t memory_object_data_unavailable(object, offset, size)
- vm_object_t object;
- vm_offset_t offset;
- vm_size_t size;
+kern_return_t memory_object_data_unavailable(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size)
{
#if MACH_PAGEMAP
vm_external_t existence_info = VM_EXTERNAL_NULL;
@@ -406,7 +393,7 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
offset -= object->paging_offset;
while (size != 0) {
- register vm_page_t m;
+ vm_page_t m;
/*
* We're looking for pages that are both busy and
@@ -453,12 +440,11 @@ kern_return_t memory_object_data_unavailable(object, offset, size)
#define MEMORY_OBJECT_LOCK_RESULT_MUST_CLEAN 2
#define MEMORY_OBJECT_LOCK_RESULT_MUST_RETURN 3
-memory_object_lock_result_t memory_object_lock_page(m, should_return,
- should_flush, prot)
- vm_page_t m;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
+memory_object_lock_result_t memory_object_lock_page(
+ vm_page_t m,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot)
{
/*
* Don't worry about pages for which the kernel
@@ -656,19 +642,17 @@ memory_object_lock_result_t memory_object_lock_page(m, should_return,
*/
kern_return_t
-memory_object_lock_request(object, offset, size,
- should_return, should_flush, prot,
- reply_to, reply_to_type)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_size_t size;
- memory_object_return_t should_return;
- boolean_t should_flush;
- vm_prot_t prot;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+memory_object_lock_request(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_size_t size,
+ memory_object_return_t should_return,
+ boolean_t should_flush,
+ vm_prot_t prot,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
- register vm_page_t m;
+ vm_page_t m;
vm_offset_t original_offset = offset;
vm_size_t original_size = size;
vm_offset_t paging_offset = 0;
@@ -720,8 +704,8 @@ memory_object_lock_request(object, offset, size,
#define PAGEOUT_PAGES \
MACRO_BEGIN \
vm_map_copy_t copy; \
- register int i; \
- register vm_page_t hp; \
+ int i; \
+ vm_page_t hp; \
\
vm_object_unlock(object); \
\
@@ -892,13 +876,12 @@ MACRO_END
}
kern_return_t
-memory_object_set_attributes_common(object, object_ready, may_cache,
- copy_strategy, use_old_pageout)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- boolean_t use_old_pageout;
+memory_object_set_attributes_common(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ boolean_t use_old_pageout)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -959,13 +942,12 @@ memory_object_set_attributes_common(object, object_ready, may_cache,
* XXX stub that made change_attributes an RPC. Need investigation.
*/
-kern_return_t memory_object_change_attributes(object, may_cache,
- copy_strategy, reply_to, reply_to_type)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
- ipc_port_t reply_to;
- mach_msg_type_name_t reply_to_type;
+kern_return_t memory_object_change_attributes(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy,
+ ipc_port_t reply_to,
+ mach_msg_type_name_t reply_to_type)
{
kern_return_t result;
@@ -995,33 +977,32 @@ kern_return_t memory_object_change_attributes(object, may_cache,
}
kern_return_t
-memory_object_set_attributes(object, object_ready, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t object_ready;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+memory_object_set_attributes(
+ vm_object_t object,
+ boolean_t object_ready,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, object_ready,
may_cache, copy_strategy,
TRUE);
}
-kern_return_t memory_object_ready(object, may_cache, copy_strategy)
- vm_object_t object;
- boolean_t may_cache;
- memory_object_copy_strategy_t copy_strategy;
+kern_return_t memory_object_ready(
+ vm_object_t object,
+ boolean_t may_cache,
+ memory_object_copy_strategy_t copy_strategy)
{
return memory_object_set_attributes_common(object, TRUE,
may_cache, copy_strategy,
FALSE);
}
-kern_return_t memory_object_get_attributes(object, object_ready,
- may_cache, copy_strategy)
- vm_object_t object;
- boolean_t *object_ready;
- boolean_t *may_cache;
- memory_object_copy_strategy_t *copy_strategy;
+kern_return_t memory_object_get_attributes(
+ vm_object_t object,
+ boolean_t *object_ready,
+ boolean_t *may_cache,
+ memory_object_copy_strategy_t *copy_strategy)
{
if (object == VM_OBJECT_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -1041,7 +1022,7 @@ kern_return_t memory_object_get_attributes(object, object_ready,
* If successful, consumes the supplied naked send right.
*/
kern_return_t vm_set_default_memory_manager(host, default_manager)
- host_t host;
+ const host_t host;
ipc_port_t *default_manager;
{
ipc_port_t current_manager;
@@ -1123,7 +1104,7 @@ ipc_port_t memory_manager_default_reference(void)
*/
boolean_t memory_manager_default_port(port)
- ipc_port_t port;
+ const ipc_port_t port;
{
ipc_port_t current;
boolean_t result;
diff --git a/vm/memory_object_proxy.c b/vm/memory_object_proxy.c
index 4fed312e..01bce2a5 100644
--- a/vm/memory_object_proxy.c
+++ b/vm/memory_object_proxy.c
@@ -64,7 +64,7 @@ void
memory_object_proxy_init (void)
{
kmem_cache_init (&memory_object_proxy_cache, "memory_object_proxy",
- sizeof (struct memory_object_proxy), 0, NULL, NULL, NULL, 0);
+ sizeof (struct memory_object_proxy), 0, NULL, 0);
}
/* Lookup a proxy memory object by its port. */
@@ -115,11 +115,11 @@ memory_object_proxy_notify (mach_msg_header_t *msg)
given OBJECT at OFFSET in the new object with the maximum
protection MAX_PROTECTION and return it in *PORT. */
kern_return_t
-memory_object_create_proxy (ipc_space_t space, vm_prot_t max_protection,
+memory_object_create_proxy (const ipc_space_t space, vm_prot_t max_protection,
ipc_port_t *object, natural_t object_count,
- vm_offset_t *offset, natural_t offset_count,
- vm_offset_t *start, natural_t start_count,
- vm_offset_t *len, natural_t len_count,
+ const vm_offset_t *offset, natural_t offset_count,
+ const vm_offset_t *start, natural_t start_count,
+ const vm_offset_t *len, natural_t len_count,
ipc_port_t *port)
{
memory_object_proxy_t proxy;
diff --git a/vm/memory_object_proxy.h b/vm/memory_object_proxy.h
index f4be0d0d..dc0ea747 100644
--- a/vm/memory_object_proxy.h
+++ b/vm/memory_object_proxy.h
@@ -19,7 +19,7 @@
Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111, USA. */
#ifndef _VM_MEMORY_OBJECT_PROXY_H_
-#define _VM_MEMORY_OBJECT_PROXT_H_
+#define _VM_MEMORY_OBJECT_PROXY_H_
#include <ipc/ipc_types.h>
#include <mach/boolean.h>
@@ -30,19 +30,8 @@
extern void memory_object_proxy_init (void);
extern boolean_t memory_object_proxy_notify (mach_msg_header_t *msg);
-extern kern_return_t memory_object_create_proxy (ipc_space_t space,
- vm_prot_t max_protection,
- ipc_port_t *object,
- natural_t object_count,
- vm_offset_t *offset,
- natural_t offset_count,
- vm_offset_t *start,
- natural_t start_count,
- vm_offset_t *len,
- natural_t len_count,
- ipc_port_t *port);
extern kern_return_t memory_object_proxy_lookup (ipc_port_t port,
ipc_port_t *object,
vm_prot_t *max_protection);
-#endif /* _VM_MEMORY_OBJECT_PROXT_H_ */
+#endif /* _VM_MEMORY_OBJECT_PROXY_H_ */
diff --git a/vm/pmap.h b/vm/pmap.h
index 59fd03ab..9bbcdc32 100644
--- a/vm/pmap.h
+++ b/vm/pmap.h
@@ -67,9 +67,6 @@
extern vm_offset_t pmap_steal_memory(vm_size_t);
/* During VM initialization, report remaining unused physical pages. */
extern unsigned int pmap_free_pages(void);
-/* During VM initialization, use remaining physical pages to allocate page
- * frames. */
-extern void pmap_startup(vm_offset_t *, vm_offset_t *);
/* Initialization, after kernel runs in virtual memory. */
extern void pmap_init(void);
@@ -80,18 +77,14 @@ extern void pmap_init(void);
* Otherwise, it must implement
* pmap_free_pages
* pmap_virtual_space
- * pmap_next_page
* pmap_init
- * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
- * using pmap_free_pages, pmap_next_page, pmap_virtual_space,
- * and pmap_enter. pmap_free_pages may over-estimate the number
- * of unused physical pages, and pmap_next_page may return FALSE
- * to indicate that there are no more unused pages to return.
+ * and vm/vm_resident.c implements pmap_steal_memory using
+ * pmap_free_pages, pmap_virtual_space, and pmap_enter.
+ *
+ * pmap_free_pages may over-estimate the number of unused physical pages.
* However, for best performance pmap_free_pages should be accurate.
*/
-/* During VM initialization, return the next unused physical page. */
-extern boolean_t pmap_next_page(vm_offset_t *);
/* During VM initialization, report virtual space available for the kernel. */
extern void pmap_virtual_space(vm_offset_t *, vm_offset_t *);
#endif /* MACHINE_PAGES */
@@ -163,38 +156,16 @@ void pmap_clear_modify(vm_offset_t pa);
/* Return modify bit */
boolean_t pmap_is_modified(vm_offset_t pa);
-
-/*
- * Statistics routines
- */
-
-#ifndef pmap_resident_count
-extern int pmap_resident_count();
-#endif /* pmap_resident_count */
-
/*
* Sundry required routines
*/
/* Return a virtual-to-physical mapping, if possible. */
extern vm_offset_t pmap_extract(pmap_t, vm_offset_t);
-/* Is virtual address valid? */
-extern boolean_t pmap_access();
/* Perform garbage collection, if any. */
extern void pmap_collect(pmap_t);
/* Specify pageability. */
extern void pmap_change_wiring(pmap_t, vm_offset_t, boolean_t);
-#ifndef pmap_phys_address
-/* Transform address returned by device driver mapping function to physical
- * address known to this module. */
-extern vm_offset_t pmap_phys_address();
-#endif /* pmap_phys_address */
-#ifndef pmap_phys_to_frame
-/* Inverse of pmap_phys_address, for use by device driver mapping function in
- * machine-independent pseudo-devices. */
-extern int pmap_phys_to_frame();
-#endif /* pmap_phys_to_frame */
-
/*
* Optional routines
*/
@@ -205,7 +176,7 @@ extern void pmap_copy(pmap_t, pmap_t, vm_offset_t, vm_size_t,
#endif /* pmap_copy */
#ifndef pmap_attribute
/* Get/Set special memory attributes. */
-extern kern_return_t pmap_attribute();
+extern kern_return_t pmap_attribute(void);
#endif /* pmap_attribute */
/*
diff --git a/vm/vm_debug.c b/vm/vm_debug.c
index 0af58b69..227090e6 100644
--- a/vm/vm_debug.c
+++ b/vm/vm_debug.c
@@ -65,8 +65,7 @@
*/
ipc_port_t
-vm_object_real_name(object)
- vm_object_t object;
+vm_object_real_name(vm_object_t object)
{
ipc_port_t port = IP_NULL;
@@ -94,11 +93,11 @@ vm_object_real_name(object)
*/
kern_return_t
-mach_vm_region_info(map, address, regionp, portp)
- vm_map_t map;
- vm_offset_t address;
- vm_region_info_t *regionp;
- ipc_port_t *portp;
+mach_vm_region_info(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_region_info_t *regionp,
+ ipc_port_t *portp)
{
vm_map_t cmap; /* current map in traversal */
vm_map_t nmap; /* next map to look at */
@@ -184,11 +183,11 @@ mach_vm_region_info(map, address, regionp, portp)
*/
kern_return_t
-mach_vm_object_info(object, infop, shadowp, copyp)
- vm_object_t object;
- vm_object_info_t *infop;
- ipc_port_t *shadowp;
- ipc_port_t *copyp;
+mach_vm_object_info(
+ vm_object_t object,
+ vm_object_info_t *infop,
+ ipc_port_t *shadowp,
+ ipc_port_t *copyp)
{
vm_object_info_t info;
vm_object_info_state_t state;
@@ -278,10 +277,10 @@ mach_vm_object_info(object, infop, shadowp, copyp)
*/
kern_return_t
-mach_vm_object_pages(object, pagesp, countp)
- vm_object_t object;
- vm_page_info_array_t *pagesp;
- natural_t *countp;
+mach_vm_object_pages(
+ vm_object_t object,
+ vm_page_info_array_t *pagesp,
+ natural_t *countp)
{
vm_size_t size;
vm_offset_t addr;
@@ -404,7 +403,7 @@ mach_vm_object_pages(object, pagesp, countp)
addr + rsize_used, size - rsize_used);
if (size_used != rsize_used)
- memset((char *) (addr + size_used), 0,
+ memset((void *) (addr + size_used), 0,
rsize_used - size_used);
kr = vm_map_copyin(ipc_kernel_map, addr, rsize_used,
@@ -434,7 +433,7 @@ mach_vm_object_pages(object, pagesp, countp)
kern_return_t
host_virtual_physical_table_info(host, infop, countp)
- host_t host;
+ const host_t host;
hash_info_bucket_array_t *infop;
natural_t *countp;
{
diff --git a/vm/vm_external.c b/vm/vm_external.c
index e9643ffc..3b1a2879 100644
--- a/vm/vm_external.c
+++ b/vm/vm_external.c
@@ -35,6 +35,7 @@
#include <vm/vm_external.h>
#include <mach/vm_param.h>
#include <kern/assert.h>
+#include <string.h>
@@ -56,8 +57,7 @@ struct kmem_cache vm_object_small_existence_map_cache;
struct kmem_cache vm_object_large_existence_map_cache;
-vm_external_t vm_external_create(size)
- vm_offset_t size;
+vm_external_t vm_external_create(vm_offset_t size)
{
vm_external_t result;
vm_size_t bytes;
@@ -70,16 +70,16 @@ vm_external_t vm_external_create(size)
result->existence_map =
(char *) kmem_cache_alloc(&vm_object_small_existence_map_cache);
result->existence_size = SMALL_SIZE;
- } else if (bytes <= LARGE_SIZE) {
+ } else {
result->existence_map =
(char *) kmem_cache_alloc(&vm_object_large_existence_map_cache);
result->existence_size = LARGE_SIZE;
}
+ memset (result->existence_map, 0, result->existence_size);
return(result);
}
-void vm_external_destroy(e)
- vm_external_t e;
+void vm_external_destroy(vm_external_t e)
{
if (e == VM_EXTERNAL_NULL)
return;
@@ -97,8 +97,8 @@ void vm_external_destroy(e)
}
vm_external_state_t _vm_external_state_get(e, offset)
- vm_external_t e;
- vm_offset_t offset;
+ const vm_external_t e;
+ vm_offset_t offset;
{
unsigned
int bit, byte;
@@ -115,10 +115,10 @@ vm_external_state_t _vm_external_state_get(e, offset)
VM_EXTERNAL_STATE_EXISTS : VM_EXTERNAL_STATE_ABSENT );
}
-void vm_external_state_set(e, offset, state)
- vm_external_t e;
- vm_offset_t offset;
- vm_external_state_t state;
+void vm_external_state_set(
+ vm_external_t e,
+ vm_offset_t offset,
+ vm_external_state_t state)
{
unsigned
int bit, byte;
@@ -140,13 +140,13 @@ void vm_external_module_initialize(void)
vm_size_t size = (vm_size_t) sizeof(struct vm_external);
kmem_cache_init(&vm_external_cache, "vm_external", size, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_small_existence_map_cache,
"small_existence_map", SMALL_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_object_large_existence_map_cache,
"large_existence_map", LARGE_SIZE, 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
}
diff --git a/vm/vm_external.h b/vm/vm_external.h
index 55c9e48d..4e44ddf7 100644
--- a/vm/vm_external.h
+++ b/vm/vm_external.h
@@ -46,9 +46,14 @@ typedef struct vm_external {
* been written to backing
* storage.
*/
+#if 0
+ /* XXX: Currently, existence_count is not used. I guess it
+ could be useful to get rid of the map if the count drops to
+ zero. */
int existence_count;/* Number of bits turned on in
* existence_map.
*/
+#endif
} *vm_external_t;
#define VM_EXTERNAL_NULL ((vm_external_t) 0)
diff --git a/vm/vm_fault.c b/vm/vm_fault.c
index 7e849616..09e2c54d 100644
--- a/vm/vm_fault.c
+++ b/vm/vm_fault.c
@@ -51,7 +51,7 @@
#include <mach/memory_object.h>
#include <vm/memory_object_user.user.h>
/* For memory_object_data_{request,unlock} */
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/slab.h>
#if MACH_PCSAMPLE
@@ -88,8 +88,6 @@ struct kmem_cache vm_fault_state_cache;
int vm_object_absent_max = 50;
-int vm_fault_debug = 0;
-
boolean_t vm_fault_dirty_handling = FALSE;
boolean_t vm_fault_interruptible = TRUE;
@@ -107,7 +105,7 @@ extern struct db_watchpoint *db_watchpoint_list;
void vm_fault_init(void)
{
kmem_cache_init(&vm_fault_state_cache, "vm_fault_state",
- sizeof(vm_fault_state_t), 0, NULL, NULL, NULL, 0);
+ sizeof(vm_fault_state_t), 0, NULL, 0);
}
/*
@@ -125,9 +123,9 @@ void vm_fault_init(void)
* "object" must be locked.
*/
void
-vm_fault_cleanup(object, top_page)
- register vm_object_t object;
- register vm_page_t top_page;
+vm_fault_cleanup(
+ vm_object_t object,
+ vm_page_t top_page)
{
vm_object_paging_end(object);
vm_object_unlock(object);
@@ -204,33 +202,26 @@ vm_fault_cleanup(object, top_page)
* The "result_page" is also left busy. It is not removed
* from the pageout queues.
*/
-vm_fault_return_t vm_fault_page(first_object, first_offset,
- fault_type, must_be_resident, interruptible,
- protection,
- result_page, top_page,
- resume, continuation)
+vm_fault_return_t vm_fault_page(
/* Arguments: */
- vm_object_t first_object; /* Object to begin search */
- vm_offset_t first_offset; /* Offset into object */
- vm_prot_t fault_type; /* What access is requested */
- boolean_t must_be_resident;/* Must page be resident? */
- boolean_t interruptible; /* May fault be interrupted? */
+ vm_object_t first_object, /* Object to begin search */
+ vm_offset_t first_offset, /* Offset into object */
+ vm_prot_t fault_type, /* What access is requested */
+ boolean_t must_be_resident,/* Must page be resident? */
+ boolean_t interruptible, /* May fault be interrupted? */
/* Modifies in place: */
- vm_prot_t *protection; /* Protection for mapping */
+ vm_prot_t *protection, /* Protection for mapping */
/* Returns: */
- vm_page_t *result_page; /* Page found, if successful */
- vm_page_t *top_page; /* Page in top object, if
+ vm_page_t *result_page, /* Page found, if successful */
+ vm_page_t *top_page, /* Page in top object, if
* not result_page.
*/
/* More arguments: */
- boolean_t resume; /* We are restarting. */
- void (*continuation)(); /* Continuation for blocking. */
+ boolean_t resume, /* We are restarting. */
+ void (*continuation)()) /* Continuation for blocking. */
{
- register
vm_page_t m;
- register
vm_object_t object;
- register
vm_offset_t offset;
vm_page_t first_m;
vm_object_t next_object;
@@ -239,7 +230,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_prot_t access_required;
if (resume) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
if (state->vmfp_backoff)
@@ -357,7 +348,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
PAGE_ASSERT_WAIT(m, interruptible);
vm_object_unlock(object);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -616,7 +607,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* won't block for pages.
*/
- if (m->fictitious && !vm_page_convert(m, FALSE)) {
+ if (m->fictitious && !vm_page_convert(&m, FALSE)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, first_m);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -734,7 +725,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
assert(m->object == object);
first_m = VM_PAGE_NULL;
- if (m->fictitious && !vm_page_convert(m, !object->internal)) {
+ if (m->fictitious && !vm_page_convert(&m, !object->internal)) {
VM_PAGE_FREE(m);
vm_fault_cleanup(object, VM_PAGE_NULL);
return(VM_FAULT_MEMORY_SHORTAGE);
@@ -777,12 +768,10 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
* objects.
*/
-#if EXTRA_ASSERTIONS
assert(m->busy && !m->absent);
assert((first_m == VM_PAGE_NULL) ||
(first_m->busy && !first_m->absent &&
!first_m->active && !first_m->inactive));
-#endif /* EXTRA_ASSERTIONS */
/*
* If the page is being written, but isn't
@@ -1094,7 +1083,7 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
vm_fault_cleanup(object, first_m);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1141,9 +1130,9 @@ vm_fault_return_t vm_fault_page(first_object, first_offset,
*/
void
-vm_fault_continue()
+vm_fault_continue(void)
{
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
(void) vm_fault(state->vmf_map,
@@ -1154,14 +1143,13 @@ vm_fault_continue()
/*NOTREACHED*/
}
-kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
- resume, continuation)
- vm_map_t map;
- vm_offset_t vaddr;
- vm_prot_t fault_type;
- boolean_t change_wiring;
- boolean_t resume;
- void (*continuation)();
+kern_return_t vm_fault(
+ vm_map_t map,
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+ boolean_t change_wiring,
+ boolean_t resume,
+ void (*continuation)())
{
vm_map_version_t version; /* Map version for verificiation */
boolean_t wired; /* Should mapping be wired down? */
@@ -1173,11 +1161,10 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
vm_page_t top_page; /* Placeholder page */
kern_return_t kr;
- register
vm_page_t m; /* Fast access to result_page */
if (resume) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1253,7 +1240,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
vm_object_paging_begin(object);
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1307,7 +1294,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
goto done;
case VM_FAULT_MEMORY_SHORTAGE:
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
/*
@@ -1490,7 +1477,7 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
done:
if (continuation != (void (*)()) 0) {
- register vm_fault_state_t *state =
+ vm_fault_state_t *state =
(vm_fault_state_t *) current_thread()->ith_other;
kmem_cache_free(&vm_fault_state_cache, (vm_offset_t) state);
@@ -1501,21 +1488,19 @@ kern_return_t vm_fault(map, vaddr, fault_type, change_wiring,
return(kr);
}
-kern_return_t vm_fault_wire_fast();
-
/*
* vm_fault_wire:
*
* Wire down a range of virtual addresses in a map.
*/
-void vm_fault_wire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_wire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t va;
- register pmap_t pmap;
- register vm_offset_t end_addr = entry->vme_end;
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
pmap = vm_map_pmap(map);
@@ -1544,14 +1529,14 @@ void vm_fault_wire(map, entry)
*
* Unwire a range of virtual addresses in a map.
*/
-void vm_fault_unwire(map, entry)
- vm_map_t map;
- vm_map_entry_t entry;
+void vm_fault_unwire(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t va;
- register pmap_t pmap;
- register vm_offset_t end_addr = entry->vme_end;
- vm_object_t object;
+ vm_offset_t va;
+ pmap_t pmap;
+ vm_offset_t end_addr = entry->vme_end;
+ vm_object_t object;
pmap = vm_map_pmap(map);
@@ -1633,14 +1618,14 @@ void vm_fault_unwire(map, entry)
* other than the common case will return KERN_FAILURE, and the caller
* is expected to call vm_fault().
*/
-kern_return_t vm_fault_wire_fast(map, va, entry)
- vm_map_t map;
- vm_offset_t va;
- vm_map_entry_t entry;
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry)
{
vm_object_t object;
vm_offset_t offset;
- register vm_page_t m;
+ vm_page_t m;
vm_prot_t prot;
vm_stat.faults++; /* needs lock XXX */
@@ -1782,9 +1767,9 @@ kern_return_t vm_fault_wire_fast(map, va, entry)
* Release a page used by vm_fault_copy.
*/
-void vm_fault_copy_cleanup(page, top_page)
- vm_page_t page;
- vm_page_t top_page;
+void vm_fault_copy_cleanup(
+ vm_page_t page,
+ vm_page_t top_page)
{
vm_object_t object = page->object;
@@ -1825,23 +1810,14 @@ void vm_fault_copy_cleanup(page, top_page)
* requested.
*/
kern_return_t vm_fault_copy(
- src_object,
- src_offset,
- src_size,
- dst_object,
- dst_offset,
- dst_map,
- dst_version,
- interruptible
- )
- vm_object_t src_object;
- vm_offset_t src_offset;
- vm_size_t *src_size; /* INOUT */
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_map_t dst_map;
- vm_map_version_t *dst_version;
- boolean_t interruptible;
+ vm_object_t src_object,
+ vm_offset_t src_offset,
+ vm_size_t *src_size, /* INOUT */
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_map_t dst_map,
+ vm_map_version_t *dst_version,
+ boolean_t interruptible)
{
vm_page_t result_page;
vm_prot_t prot;
@@ -2022,13 +1998,11 @@ kern_return_t vm_fault_copy(
* XXX Untested. Also unused. Eventually, this technology
* could be used in vm_fault_copy() to advantage.
*/
-vm_fault_return_t vm_fault_page_overwrite(dst_object, dst_offset, result_page)
- register
- vm_object_t dst_object;
- vm_offset_t dst_offset;
- vm_page_t *result_page; /* OUT */
+vm_fault_return_t vm_fault_page_overwrite(
+ vm_object_t dst_object,
+ vm_offset_t dst_offset,
+ vm_page_t *result_page) /* OUT */
{
- register
vm_page_t dst_page;
#define interruptible FALSE /* XXX */
diff --git a/vm/vm_fault.h b/vm/vm_fault.h
index 0492ccf4..7fdbc417 100644
--- a/vm/vm_fault.h
+++ b/vm/vm_fault.h
@@ -69,4 +69,10 @@ extern void vm_fault_unwire(vm_map_t, vm_map_entry_t);
extern kern_return_t vm_fault_copy(vm_object_t, vm_offset_t, vm_size_t *,
vm_object_t, vm_offset_t, vm_map_t,
vm_map_version_t *, boolean_t);
+
+kern_return_t vm_fault_wire_fast(
+ vm_map_t map,
+ vm_offset_t va,
+ vm_map_entry_t entry);
+
#endif /* _VM_VM_FAULT_H_ */
diff --git a/vm/vm_init.c b/vm/vm_init.c
index 89eb0984..23d5d46e 100644
--- a/vm/vm_init.c
+++ b/vm/vm_init.c
@@ -51,7 +51,7 @@
* This is done only by the first cpu up.
*/
-void vm_mem_bootstrap()
+void vm_mem_bootstrap(void)
{
vm_offset_t start, end;
@@ -79,8 +79,9 @@ void vm_mem_bootstrap()
memory_manager_default_init();
}
-void vm_mem_init()
+void vm_mem_init(void)
{
vm_object_init();
memory_object_proxy_init();
+ vm_page_info_all();
}
diff --git a/vm/vm_init.h b/vm/vm_init.h
new file mode 100644
index 00000000..42ef48b2
--- /dev/null
+++ b/vm/vm_init.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _VM_VM_INIT_H_
+#define _VM_VM_INIT_H_
+
+extern void vm_mem_init(void);
+extern void vm_mem_bootstrap(void);
+
+#endif /* _VM_VM_INIT_H_ */
diff --git a/vm/vm_kern.c b/vm/vm_kern.c
index fd46e982..9c0a20b7 100644
--- a/vm/vm_kern.c
+++ b/vm/vm_kern.c
@@ -42,6 +42,7 @@
#include <kern/assert.h>
#include <kern/debug.h>
#include <kern/lock.h>
+#include <kern/slab.h>
#include <kern/thread.h>
#include <kern/printf.h>
#include <vm/pmap.h>
@@ -62,9 +63,6 @@ static struct vm_map kernel_map_store;
vm_map_t kernel_map = &kernel_map_store;
vm_map_t kernel_pageable_map;
-extern void kmem_alloc_pages();
-extern void kmem_remap_pages();
-
/*
* projected_buffer_allocate
*
@@ -82,15 +80,14 @@ extern void kmem_remap_pages();
*/
kern_return_t
-projected_buffer_allocate(map, size, persistence, kernel_p,
- user_p, protection, inheritance)
- vm_map_t map;
- vm_size_t size;
- int persistence;
- vm_offset_t *kernel_p;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_allocate(
+ vm_map_t map,
+ vm_size_t size,
+ int persistence,
+ vm_offset_t *kernel_p,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_object_t object;
vm_map_entry_t u_entry, k_entry;
@@ -180,13 +177,13 @@ projected_buffer_allocate(map, size, persistence, kernel_p,
*/
kern_return_t
-projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
- vm_map_t map;
- vm_offset_t kernel_addr;
- vm_size_t size;
- vm_offset_t *user_p;
- vm_prot_t protection;
- vm_inherit_t inheritance; /*Currently only VM_INHERIT_NONE supported*/
+projected_buffer_map(
+ vm_map_t map,
+ vm_offset_t kernel_addr,
+ vm_size_t size,
+ vm_offset_t *user_p,
+ vm_prot_t protection,
+ vm_inherit_t inheritance) /*Currently only VM_INHERIT_NONE supported*/
{
vm_map_entry_t u_entry, k_entry;
vm_offset_t physical_addr, user_addr;
@@ -253,15 +250,18 @@ projected_buffer_map(map, kernel_addr, size, user_p, protection, inheritance)
*/
kern_return_t
-projected_buffer_deallocate(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry, k_entry;
+ if (map == VM_MAP_NULL || map == kernel_map)
+ return KERN_INVALID_ARGUMENT;
+
vm_map_lock(map);
- if (map == VM_MAP_NULL || map == kernel_map ||
- !vm_map_lookup_entry(map, start, &entry) ||
+ if (!vm_map_lookup_entry(map, start, &entry) ||
end > entry->vme_end ||
/*Check corresponding kernel entry*/
(k_entry = entry->projected_on) == 0) {
@@ -303,8 +303,7 @@ projected_buffer_deallocate(map, start, end)
*/
kern_return_t
-projected_buffer_collect(map)
- vm_map_t map;
+projected_buffer_collect(vm_map_t map)
{
vm_map_entry_t entry, next;
@@ -330,9 +329,10 @@ projected_buffer_collect(map)
*/
boolean_t
-projected_buffer_in_range(map, start, end)
- vm_map_t map;
- vm_offset_t start, end;
+projected_buffer_in_range(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
@@ -359,14 +359,15 @@ projected_buffer_in_range(map, start, end)
*/
kern_return_t
-kmem_alloc(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_object_t object;
vm_map_entry_t entry;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
/*
@@ -385,12 +386,22 @@ kmem_alloc(map, addrp, size)
size = round_page(size);
object = vm_object_allocate(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
VM_OBJECT_NULL, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more room for kmem_alloc in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc in %p\n", map);
vm_object_deallocate(object);
return kr;
}
@@ -420,113 +431,25 @@ kmem_alloc(map, addrp, size)
}
/*
- * kmem_realloc:
- *
- * Reallocate wired-down memory in the kernel's address map
- * or a submap. Newly allocated pages are not zeroed.
- * This can only be used on regions allocated with kmem_alloc.
- *
- * If successful, the pages in the old region are mapped twice.
- * The old region is unchanged. Use kmem_free to get rid of it.
- */
-kern_return_t kmem_realloc(map, oldaddr, oldsize, newaddrp, newsize)
- vm_map_t map;
- vm_offset_t oldaddr;
- vm_size_t oldsize;
- vm_offset_t *newaddrp;
- vm_size_t newsize;
-{
- vm_offset_t oldmin, oldmax;
- vm_offset_t newaddr;
- vm_object_t object;
- vm_map_entry_t oldentry, newentry;
- kern_return_t kr;
-
- oldmin = trunc_page(oldaddr);
- oldmax = round_page(oldaddr + oldsize);
- oldsize = oldmax - oldmin;
- newsize = round_page(newsize);
-
- /*
- * Find space for the new region.
- */
-
- vm_map_lock(map);
- kr = vm_map_find_entry(map, &newaddr, newsize, (vm_offset_t) 0,
- VM_OBJECT_NULL, &newentry);
- if (kr != KERN_SUCCESS) {
- vm_map_unlock(map);
- printf_once("no more room for kmem_realloc in %p\n", map);
- return kr;
- }
-
- /*
- * Find the VM object backing the old region.
- */
-
- if (!vm_map_lookup_entry(map, oldmin, &oldentry))
- panic("kmem_realloc");
- object = oldentry->object.vm_object;
-
- /*
- * Increase the size of the object and
- * fill in the new region.
- */
-
- vm_object_reference(object);
- vm_object_lock(object);
- if (object->size != oldsize)
- panic("kmem_realloc");
- object->size = newsize;
- vm_object_unlock(object);
-
- newentry->object.vm_object = object;
- newentry->offset = 0;
-
- /*
- * Since we have not given out this address yet,
- * it is safe to unlock the map. We are trusting
- * that nobody will play with either region.
- */
-
- vm_map_unlock(map);
-
- /*
- * Remap the pages in the old region and
- * allocate more pages for the new region.
- */
-
- kmem_remap_pages(object, 0,
- newaddr, newaddr + oldsize,
- VM_PROT_DEFAULT);
- kmem_alloc_pages(object, oldsize,
- newaddr + oldsize, newaddr + newsize,
- VM_PROT_DEFAULT);
-
- *newaddrp = newaddr;
- return KERN_SUCCESS;
-}
-
-/*
* kmem_alloc_wired:
*
* Allocate wired-down memory in the kernel's address map
* or a submap. The memory is not zero-filled.
*
* The memory is allocated in the kernel_object.
- * It may not be copied with vm_map_copy, and
- * it may not be reallocated with kmem_realloc.
+ * It may not be copied with vm_map_copy.
*/
kern_return_t
-kmem_alloc_wired(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_wired(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
/*
@@ -537,12 +460,22 @@ kmem_alloc_wired(map, addrp, size)
*/
size = round_page(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, (vm_offset_t) 0,
kernel_object, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more room for kmem_alloc_wired in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_wired in %p\n", map);
return kr;
}
@@ -591,14 +524,15 @@ kmem_alloc_wired(map, addrp, size)
*/
kern_return_t
-kmem_alloc_aligned(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_aligned(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_map_entry_t entry;
vm_offset_t offset;
vm_offset_t addr;
+ unsigned int attempts;
kern_return_t kr;
if ((size & (size - 1)) != 0)
@@ -612,12 +546,22 @@ kmem_alloc_aligned(map, addrp, size)
*/
size = round_page(size);
+ attempts = 0;
+
+retry:
vm_map_lock(map);
kr = vm_map_find_entry(map, &addr, size, size - 1,
kernel_object, &entry);
if (kr != KERN_SUCCESS) {
- printf_once("no more rooom for kmem_alloc_aligned in %p\n", map);
vm_map_unlock(map);
+
+ if (attempts == 0) {
+ attempts++;
+ slab_collect();
+ goto retry;
+ }
+
+ printf_once("no more room for kmem_alloc_aligned in %p\n", map);
return kr;
}
@@ -665,10 +609,10 @@ kmem_alloc_aligned(map, addrp, size)
*/
kern_return_t
-kmem_alloc_pageable(map, addrp, size)
- vm_map_t map;
- vm_offset_t *addrp;
- vm_size_t size;
+kmem_alloc_pageable(
+ vm_map_t map,
+ vm_offset_t *addrp,
+ vm_size_t size)
{
vm_offset_t addr;
kern_return_t kr;
@@ -696,10 +640,10 @@ kmem_alloc_pageable(map, addrp, size)
*/
void
-kmem_free(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_free(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
kern_return_t kr;
@@ -714,11 +658,12 @@ kmem_free(map, addr, size)
* a submap.
*/
void
-kmem_alloc_pages(object, offset, start, end, protection)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_offset_t start, end;
- vm_prot_t protection;
+kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -726,7 +671,7 @@ kmem_alloc_pages(object, offset, start, end, protection)
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock(object);
@@ -769,11 +714,12 @@ kmem_alloc_pages(object, offset, start, end, protection)
* a submap.
*/
void
-kmem_remap_pages(object, offset, start, end, protection)
- register vm_object_t object;
- register vm_offset_t offset;
- register vm_offset_t start, end;
- vm_prot_t protection;
+kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection)
{
/*
* Mark the pmap region as not pageable.
@@ -781,7 +727,7 @@ kmem_remap_pages(object, offset, start, end, protection)
pmap_pageable(kernel_pmap, start, end, FALSE);
while (start < end) {
- register vm_page_t mem;
+ vm_page_t mem;
vm_object_lock(object);
@@ -827,11 +773,13 @@ kmem_remap_pages(object, offset, start, end, protection)
*/
void
-kmem_submap(map, parent, min, max, size, pageable)
- vm_map_t map, parent;
- vm_offset_t *min, *max;
- vm_size_t size;
- boolean_t pageable;
+kmem_submap(
+ vm_map_t map,
+ vm_map_t parent,
+ vm_offset_t *min,
+ vm_offset_t *max,
+ vm_size_t size,
+ boolean_t pageable)
{
vm_offset_t addr;
kern_return_t kr;
@@ -845,7 +793,7 @@ kmem_submap(map, parent, min, max, size, pageable)
*/
vm_object_reference(vm_submap_object);
- addr = (vm_offset_t) vm_map_min(parent);
+ addr = vm_map_min(parent);
kr = vm_map_enter(parent, &addr, size,
(vm_offset_t) 0, TRUE,
vm_submap_object, (vm_offset_t) 0, FALSE,
@@ -869,9 +817,9 @@ kmem_submap(map, parent, min, max, size, pageable)
* Initialize the kernel's virtual memory map, taking
* into account all memory allocated up to this time.
*/
-void kmem_init(start, end)
- vm_offset_t start;
- vm_offset_t end;
+void kmem_init(
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_setup(kernel_map, pmap_kernel(), VM_MIN_KERNEL_ADDRESS, end,
FALSE);
@@ -879,7 +827,6 @@ void kmem_init(start, end)
/*
* Reserve virtual memory allocated up to this time.
*/
-
if (start != VM_MIN_KERNEL_ADDRESS) {
kern_return_t rc;
vm_offset_t addr = VM_MIN_KERNEL_ADDRESS;
@@ -890,7 +837,7 @@ void kmem_init(start, end)
VM_PROT_DEFAULT, VM_PROT_ALL,
VM_INHERIT_DEFAULT);
if (rc)
- panic("%s:%d: vm_map_enter failed (%d)\n", rc);
+ panic("vm_map_enter failed (%d)\n", rc);
}
}
@@ -907,21 +854,19 @@ void kmem_init(start, end)
*/
kern_return_t
-kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
- vm_map_t map;
- vm_offset_t *addr; /* actual addr of data */
- vm_offset_t *alloc_addr; /* page aligned addr */
- vm_size_t *alloc_size; /* size allocated */
- vm_map_copy_t copy;
- vm_size_t min_size; /* Do at least this much */
+kmem_io_map_copyout(
+ vm_map_t map,
+ vm_offset_t *addr, /* actual addr of data */
+ vm_offset_t *alloc_addr, /* page aligned addr */
+ vm_size_t *alloc_size, /* size allocated */
+ vm_map_copy_t copy,
+ vm_size_t min_size) /* Do at least this much */
{
vm_offset_t myaddr, offset;
vm_size_t mysize, copy_size;
kern_return_t ret;
- register
vm_page_t *page_list;
vm_map_copy_t new_copy;
- register
int i;
assert(copy->type == VM_MAP_COPY_PAGE_LIST);
@@ -1013,10 +958,10 @@ kmem_io_map_copyout(map, addr, alloc_addr, alloc_size, copy, min_size)
*/
void
-kmem_io_map_deallocate(map, addr, size)
- vm_map_t map;
- vm_offset_t addr;
- vm_size_t size;
+kmem_io_map_deallocate(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_size_t size)
{
/*
* Remove the mappings. The pmap_remove is needed.
@@ -1035,10 +980,11 @@ kmem_io_map_deallocate(map, addr, size)
* and the kernel map/submaps.
*/
-int copyinmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyinmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
@@ -1061,10 +1007,11 @@ int copyinmap(map, fromaddr, toaddr, length)
* and the kernel map/submaps.
*/
-int copyoutmap(map, fromaddr, toaddr, length)
- vm_map_t map;
- char *fromaddr, *toaddr;
- int length;
+int copyoutmap(
+ vm_map_t map,
+ char *fromaddr,
+ char *toaddr,
+ int length)
{
if (vm_map_pmap(map) == kernel_pmap) {
/* assume a correct copy */
diff --git a/vm/vm_kern.h b/vm/vm_kern.h
index 22b7c123..fb8ac7f8 100644
--- a/vm/vm_kern.h
+++ b/vm/vm_kern.h
@@ -54,8 +54,6 @@ extern kern_return_t kmem_alloc_pageable(vm_map_t, vm_offset_t *,
vm_size_t);
extern kern_return_t kmem_alloc_wired(vm_map_t, vm_offset_t *, vm_size_t);
extern kern_return_t kmem_alloc_aligned(vm_map_t, vm_offset_t *, vm_size_t);
-extern kern_return_t kmem_realloc(vm_map_t, vm_offset_t, vm_size_t,
- vm_offset_t *, vm_size_t);
extern void kmem_free(vm_map_t, vm_offset_t, vm_size_t);
extern void kmem_submap(vm_map_t, vm_map_t, vm_offset_t *,
@@ -82,4 +80,18 @@ extern boolean_t projected_buffer_in_range(
vm_offset_t start,
vm_offset_t end);
+extern void kmem_alloc_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
+extern void kmem_remap_pages(
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t protection);
+
#endif /* _VM_VM_KERN_H_ */
diff --git a/vm/vm_map.c b/vm/vm_map.c
index 2be71471..89a2b382 100644
--- a/vm/vm_map.c
+++ b/vm/vm_map.c
@@ -58,20 +58,6 @@
#include <vm/vm_print.h>
#endif /* MACH_KDB */
-
-/* Forward declarations */
-kern_return_t vm_map_delete(
- vm_map_t map,
- vm_offset_t start,
- vm_offset_t end);
-
-kern_return_t vm_map_copyout_page_list(
- vm_map_t dst_map,
- vm_offset_t *dst_addr, /* OUT */
- vm_map_copy_t copy);
-
-void vm_map_copy_page_discard (vm_map_copy_t copy);
-
/*
* Macros to copy a vm_map_entry. We must be careful to correctly
* manage the wired page count. vm_map_entry_copy() creates a new
@@ -140,11 +126,8 @@ MACRO_END
struct kmem_cache vm_map_cache; /* cache for vm_map structures */
struct kmem_cache vm_map_entry_cache; /* cache for vm_map_entry structures */
-struct kmem_cache vm_map_kentry_cache; /* cache for kernel entry structures */
struct kmem_cache vm_map_copy_cache; /* cache for vm_map_copy structures */
-boolean_t vm_map_lookup_entry(); /* forward declaration */
-
/*
* Placeholder object for submap operations. This object is dropped
* into the range by a call to vm_map_find, and removed when
@@ -163,58 +146,36 @@ vm_object_t vm_submap_object = &vm_submap_object_store;
* Map and entry structures are allocated from caches -- we must
* initialize those caches.
*
- * There are three caches of interest:
+ * There are two caches of interest:
*
* vm_map_cache: used to allocate maps.
* vm_map_entry_cache: used to allocate map entries.
- * vm_map_kentry_cache: used to allocate map entries for the kernel.
*
- * Kernel map entries are allocated from a special cache, using a custom
- * page allocation function to avoid recursion. It would be difficult
- * (perhaps impossible) for the kernel to allocate more memory to an entry
- * cache when it became empty since the very act of allocating memory
- * implies the creation of a new entry.
+ * We make sure the map entry cache allocates memory directly from the
+ * physical allocator to avoid recursion with this module.
*/
-vm_offset_t kentry_data;
-vm_size_t kentry_data_size = KENTRY_DATA_SIZE;
-
-static vm_offset_t kentry_pagealloc(vm_size_t size)
-{
- vm_offset_t result;
-
- if (size > kentry_data_size)
- panic("vm_map: kentry memory exhausted");
-
- result = kentry_data;
- kentry_data += size;
- kentry_data_size -= size;
- return result;
-}
-
void vm_map_init(void)
{
kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map), 0,
- NULL, NULL, NULL, 0);
+ NULL, 0);
kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
- sizeof(struct vm_map_entry), 0, NULL, NULL, NULL, 0);
- kmem_cache_init(&vm_map_kentry_cache, "vm_map_kentry",
- sizeof(struct vm_map_entry), 0, NULL, kentry_pagealloc,
- NULL, KMEM_CACHE_NOCPUPOOL | KMEM_CACHE_NOOFFSLAB
- | KMEM_CACHE_NORECLAIM);
+ sizeof(struct vm_map_entry), 0, NULL,
+ KMEM_CACHE_NOOFFSLAB | KMEM_CACHE_PHYSMEM);
kmem_cache_init(&vm_map_copy_cache, "vm_map_copy",
- sizeof(struct vm_map_copy), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_map_copy), 0, NULL, 0);
/*
* Submap object is initialized by vm_object_init.
*/
}
-void vm_map_setup(map, pmap, min, max, pageable)
- vm_map_t map;
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+void vm_map_setup(
+ vm_map_t map,
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
vm_map_first_entry(map) = vm_map_to_entry(map);
vm_map_last_entry(map) = vm_map_to_entry(map);
@@ -223,6 +184,7 @@ void vm_map_setup(map, pmap, min, max, pageable)
rbtree_init(&map->hdr.tree);
map->size = 0;
+ map->user_wired = 0;
map->ref_count = 1;
map->pmap = pmap;
map->min_offset = min;
@@ -243,12 +205,13 @@ void vm_map_setup(map, pmap, min, max, pageable)
* the given physical map structure, and having
* the given lower and upper address bounds.
*/
-vm_map_t vm_map_create(pmap, min, max, pageable)
- pmap_t pmap;
- vm_offset_t min, max;
- boolean_t pageable;
+vm_map_t vm_map_create(
+ pmap_t pmap,
+ vm_offset_t min,
+ vm_offset_t max,
+ boolean_t pageable)
{
- register vm_map_t result;
+ vm_map_t result;
result = (vm_map_t) kmem_cache_alloc(&vm_map_cache);
if (result == VM_MAP_NULL)
@@ -272,17 +235,11 @@ vm_map_t vm_map_create(pmap, min, max, pageable)
_vm_map_entry_create(&(copy)->cpy_hdr)
vm_map_entry_t _vm_map_entry_create(map_header)
- register struct vm_map_header *map_header;
+ const struct vm_map_header *map_header;
{
- register kmem_cache_t cache;
- register vm_map_entry_t entry;
-
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
+ vm_map_entry_t entry;
- entry = (vm_map_entry_t) kmem_cache_alloc(cache);
+ entry = (vm_map_entry_t) kmem_cache_alloc(&vm_map_entry_cache);
if (entry == VM_MAP_ENTRY_NULL)
panic("vm_map_entry_create");
@@ -301,17 +258,12 @@ vm_map_entry_t _vm_map_entry_create(map_header)
_vm_map_entry_dispose(&(copy)->cpy_hdr, (entry))
void _vm_map_entry_dispose(map_header, entry)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
+ const struct vm_map_header *map_header;
+ vm_map_entry_t entry;
{
- register kmem_cache_t cache;
-
- if (map_header->entries_pageable)
- cache = &vm_map_entry_cache;
- else
- cache = &vm_map_kentry_cache;
+ (void)map_header;
- kmem_cache_free(cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
}
/*
@@ -386,8 +338,7 @@ static inline int vm_map_entry_cmp_insert(const struct rbtree_node *a,
* Creates another valid reference to the given map.
*
*/
-void vm_map_reference(map)
- register vm_map_t map;
+void vm_map_reference(vm_map_t map)
{
if (map == VM_MAP_NULL)
return;
@@ -404,10 +355,9 @@ void vm_map_reference(map)
* destroying it if no references remain.
* The map should not be locked.
*/
-void vm_map_deallocate(map)
- register vm_map_t map;
+void vm_map_deallocate(vm_map_t map)
{
- register int c;
+ int c;
if (map == VM_MAP_NULL)
return;
@@ -449,13 +399,13 @@ void vm_map_deallocate(map)
* result indicates whether the address is
* actually contained in the map.
*/
-boolean_t vm_map_lookup_entry(map, address, entry)
- register vm_map_t map;
- register vm_offset_t address;
- vm_map_entry_t *entry; /* OUT */
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry) /* OUT */
{
- register struct rbtree_node *node;
- register vm_map_entry_t hint;
+ struct rbtree_node *node;
+ vm_map_entry_t hint;
/*
* First, make a quick check to see if we are already
@@ -506,10 +456,11 @@ boolean_t vm_map_lookup_entry(map, address, entry)
*/
boolean_t
-invalid_user_access(map, start, end, prot)
- vm_map_t map;
- vm_offset_t start, end;
- vm_prot_t prot;
+invalid_user_access(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t prot)
{
vm_map_entry_t entry;
@@ -533,17 +484,17 @@ invalid_user_access(map, start, end, prot)
* are initialized to zero. If an object is supplied,
* then an existing entry may be extended.
*/
-kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
- register vm_map_t map;
- vm_offset_t *address; /* OUT */
- vm_size_t size;
- vm_offset_t mask;
- vm_object_t object;
- vm_map_entry_t *o_entry; /* OUT */
+kern_return_t vm_map_find_entry(
+ vm_map_t map,
+ vm_offset_t *address, /* OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ vm_object_t object,
+ vm_map_entry_t *o_entry) /* OUT */
{
- register vm_map_entry_t entry, new_entry;
- register vm_offset_t start;
- register vm_offset_t end;
+ vm_map_entry_t entry, new_entry;
+ vm_offset_t start;
+ vm_offset_t end;
/*
* Look for the first possible address;
@@ -562,7 +513,7 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
*/
while (TRUE) {
- register vm_map_entry_t next;
+ vm_map_entry_t next;
/*
* Find the end of the proposed new region.
@@ -687,8 +638,8 @@ kern_return_t vm_map_find_entry(map, address, size, mask, object, o_entry)
return(KERN_SUCCESS);
}
-int vm_map_pmap_enter_print = FALSE;
-int vm_map_pmap_enter_enable = FALSE;
+boolean_t vm_map_pmap_enter_print = FALSE;
+boolean_t vm_map_pmap_enter_enable = FALSE;
/*
* Routine: vm_map_pmap_enter
@@ -705,19 +656,16 @@ int vm_map_pmap_enter_enable = FALSE;
* The source map should not be locked on entry.
*/
void
-vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
- vm_map_t map;
- register
- vm_offset_t addr;
- register
- vm_offset_t end_addr;
- register
- vm_object_t object;
- vm_offset_t offset;
- vm_prot_t protection;
+vm_map_pmap_enter(
+ vm_map_t map,
+ vm_offset_t addr,
+ vm_offset_t end_addr,
+ vm_object_t object,
+ vm_offset_t offset,
+ vm_prot_t protection)
{
while (addr < end_addr) {
- register vm_page_t m;
+ vm_page_t m;
vm_object_lock(object);
vm_object_paging_begin(object);
@@ -766,27 +714,22 @@ vm_map_pmap_enter(map, addr, end_addr, object, offset, protection)
* Arguments are as defined in the vm_map call.
*/
kern_return_t vm_map_enter(
- map,
- address, size, mask, anywhere,
- object, offset, needs_copy,
- cur_protection, max_protection, inheritance)
- register
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- vm_object_t object;
- vm_offset_t offset;
- boolean_t needs_copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ vm_object_t object,
+ vm_offset_t offset,
+ boolean_t needs_copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
- register vm_map_entry_t entry;
- register vm_offset_t start;
- register vm_offset_t end;
- kern_return_t result = KERN_SUCCESS;
+ vm_map_entry_t entry;
+ vm_offset_t start;
+ vm_offset_t end;
+ kern_return_t result = KERN_SUCCESS;
#define RETURN(value) { result = value; goto BailOut; }
@@ -832,7 +775,7 @@ kern_return_t vm_map_enter(
*/
while (TRUE) {
- register vm_map_entry_t next;
+ vm_map_entry_t next;
/*
* Find the end of the proposed new region.
@@ -980,7 +923,7 @@ kern_return_t vm_map_enter(
*/
/**/ {
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
new_entry = vm_map_entry_create(map);
@@ -1051,14 +994,12 @@ kern_return_t vm_map_enter(
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_start();
#define vm_map_clip_start(map, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
_vm_map_clip_start(&(map)->hdr,(entry),(startaddr)); \
MACRO_END
-void _vm_map_copy_clip_start();
#define vm_map_copy_clip_start(copy, entry, startaddr) \
MACRO_BEGIN \
if ((startaddr) > (entry)->vme_start) \
@@ -1069,12 +1010,12 @@ void _vm_map_copy_clip_start();
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_start(map_header, entry, start)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
- register vm_offset_t start;
+void _vm_map_clip_start(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t start)
{
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
/*
* Split off the front portion --
@@ -1106,14 +1047,12 @@ void _vm_map_clip_start(map_header, entry, start)
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_end();
#define vm_map_clip_end(map, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
_vm_map_clip_end(&(map)->hdr,(entry),(endaddr)); \
MACRO_END
-void _vm_map_copy_clip_end();
#define vm_map_copy_clip_end(copy, entry, endaddr) \
MACRO_BEGIN \
if ((endaddr) < (entry)->vme_end) \
@@ -1124,12 +1063,12 @@ void _vm_map_copy_clip_end();
* This routine is called only when it is known that
* the entry must be split.
*/
-void _vm_map_clip_end(map_header, entry, end)
- register struct vm_map_header *map_header;
- register vm_map_entry_t entry;
- register vm_offset_t end;
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end)
{
- register vm_map_entry_t new_entry;
+ vm_map_entry_t new_entry;
/*
* Create a new entry and insert it
@@ -1184,15 +1123,15 @@ void _vm_map_clip_end(map_header, entry, end)
* range from the superior map, and then destroy the
* submap (if desired). [Better yet, don't try it.]
*/
-kern_return_t vm_map_submap(map, start, end, submap)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- vm_map_t submap;
+kern_return_t vm_map_submap(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_map_t submap)
{
vm_map_entry_t entry;
- register kern_return_t result = KERN_INVALID_ARGUMENT;
- register vm_object_t object;
+ kern_return_t result = KERN_INVALID_ARGUMENT;
+ vm_object_t object;
vm_map_lock(map);
@@ -1232,15 +1171,15 @@ kern_return_t vm_map_submap(map, start, end, submap)
* specified, the maximum protection is to be set;
* otherwise, only the current protection is affected.
*/
-kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_prot_t new_prot;
- register boolean_t set_max;
+kern_return_t vm_map_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t new_prot,
+ boolean_t set_max)
{
- register vm_map_entry_t current;
- vm_map_entry_t entry;
+ vm_map_entry_t current;
+ vm_map_entry_t entry;
vm_map_lock(map);
@@ -1320,13 +1259,13 @@ kern_return_t vm_map_protect(map, start, end, new_prot, set_max)
* affects how the map will be shared with
* child maps at the time of vm_map_fork.
*/
-kern_return_t vm_map_inherit(map, start, end, new_inheritance)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_inherit_t new_inheritance;
+kern_return_t vm_map_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_inherit_t new_inheritance)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
vm_map_entry_t temp_entry;
vm_map_lock(map);
@@ -1369,14 +1308,14 @@ kern_return_t vm_map_inherit(map, start, end, new_inheritance)
* Callers should use macros in vm/vm_map.h (i.e. vm_map_pageable,
* or vm_map_pageable_user); don't call vm_map_pageable directly.
*/
-kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
- register vm_prot_t access_type;
- boolean_t user_wire;
+kern_return_t vm_map_pageable_common(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end,
+ vm_prot_t access_type,
+ boolean_t user_wire)
{
- register vm_map_entry_t entry;
+ vm_map_entry_t entry;
vm_map_entry_t start_entry;
vm_map_lock(map);
@@ -1436,7 +1375,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (user_wire) {
if (--(entry->user_wired_count) == 0)
+ {
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->wired_count--;
+ }
}
else {
entry->wired_count--;
@@ -1513,7 +1455,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
if (user_wire) {
if ((entry->user_wired_count)++ == 0)
+ {
+ map->user_wired += entry->vme_end - entry->vme_start;
entry->wired_count++;
+ }
}
else {
entry->wired_count++;
@@ -1539,7 +1484,10 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
(entry->vme_end > start)) {
if (user_wire) {
if (--(entry->user_wired_count) == 0)
+ {
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->wired_count--;
+ }
}
else {
entry->wired_count--;
@@ -1618,12 +1566,12 @@ kern_return_t vm_map_pageable_common(map, start, end, access_type, user_wire)
*
* Deallocate the given entry from the target map.
*/
-void vm_map_entry_delete(map, entry)
- register vm_map_t map;
- register vm_map_entry_t entry;
+void vm_map_entry_delete(
+ vm_map_t map,
+ vm_map_entry_t entry)
{
- register vm_offset_t s, e;
- register vm_object_t object;
+ vm_offset_t s, e;
+ vm_object_t object;
extern vm_object_t kernel_object;
s = entry->vme_start;
@@ -1654,6 +1602,8 @@ void vm_map_entry_delete(map, entry)
if (entry->wired_count != 0) {
vm_fault_unwire(map, entry);
entry->wired_count = 0;
+ if (entry->user_wired_count)
+ map->user_wired -= entry->vme_end - entry->vme_start;
entry->user_wired_count = 0;
}
@@ -1702,10 +1652,10 @@ void vm_map_entry_delete(map, entry)
* map.
*/
-kern_return_t vm_map_delete(map, start, end)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
vm_map_entry_t entry;
vm_map_entry_t first_entry;
@@ -1785,12 +1735,12 @@ kern_return_t vm_map_delete(map, start, end)
* Remove the given address range from the target map.
* This is the exported form of vm_map_delete.
*/
-kern_return_t vm_map_remove(map, start, end)
- register vm_map_t map;
- register vm_offset_t start;
- register vm_offset_t end;
+kern_return_t vm_map_remove(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register kern_return_t result;
+ kern_return_t result;
vm_map_lock(map);
VM_MAP_RANGE_CHECK(map, start, end);
@@ -1808,12 +1758,11 @@ kern_return_t vm_map_remove(map, start, end)
* that have not already been stolen.
*/
void
-vm_map_copy_steal_pages(copy)
-vm_map_copy_t copy;
+vm_map_copy_steal_pages(vm_map_copy_t copy)
{
- register vm_page_t m, new_m;
- register int i;
- vm_object_t object;
+ vm_page_t m, new_m;
+ int i;
+ vm_object_t object;
for (i = 0; i < copy->cpy_npages; i++) {
@@ -1855,8 +1804,7 @@ vm_map_copy_t copy;
* stolen, they are freed. If the pages are not stolen, they
* are unbusied, and associated state is cleaned up.
*/
-void vm_map_copy_page_discard(copy)
-vm_map_copy_t copy;
+void vm_map_copy_page_discard(vm_map_copy_t copy)
{
while (copy->cpy_npages > 0) {
vm_page_t m;
@@ -1901,8 +1849,7 @@ vm_map_copy_t copy;
* vm_map_copyin).
*/
void
-vm_map_copy_discard(copy)
- vm_map_copy_t copy;
+vm_map_copy_discard(vm_map_copy_t copy)
{
free_next_copy:
if (copy == VM_MAP_COPY_NULL)
@@ -1943,7 +1890,7 @@ free_next_copy:
* here to avoid tail recursion.
*/
if (copy->cpy_cont == vm_map_copy_discard_cont) {
- register vm_map_copy_t new_copy;
+ vm_map_copy_t new_copy;
new_copy = (vm_map_copy_t) copy->cpy_cont_args;
kmem_cache_free(&vm_map_copy_cache, (vm_offset_t) copy);
@@ -1978,8 +1925,7 @@ free_next_copy:
* deallocation will not fail.
*/
vm_map_copy_t
-vm_map_copy_copy(copy)
- vm_map_copy_t copy;
+vm_map_copy_copy(vm_map_copy_t copy)
{
vm_map_copy_t new_copy;
@@ -2025,9 +1971,9 @@ vm_map_copy_copy(copy)
* A version of vm_map_copy_discard that can be called
* as a continuation from a vm_map_copy page list.
*/
-kern_return_t vm_map_copy_discard_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copy_discard_cont(
+vm_map_copyin_args_t cont_args,
+vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_discard((vm_map_copy_t) cont_args);
if (copy_result != (vm_map_copy_t *)0)
@@ -2082,11 +2028,11 @@ vm_map_copy_t *copy_result; /* OUT */
* atomically and interruptibly, an error indication is
* returned.
*/
-kern_return_t vm_map_copy_overwrite(dst_map, dst_addr, copy, interruptible)
- vm_map_t dst_map;
- vm_offset_t dst_addr;
- vm_map_copy_t copy;
- boolean_t interruptible;
+kern_return_t vm_map_copy_overwrite(
+ vm_map_t dst_map,
+ vm_offset_t dst_addr,
+ vm_map_copy_t copy,
+ boolean_t interruptible)
{
vm_size_t size;
vm_offset_t start;
@@ -2305,6 +2251,8 @@ start_pass_1:
entry->offset = copy_entry->offset;
entry->needs_copy = copy_entry->needs_copy;
entry->wired_count = 0;
+ if (entry->user_wired_count)
+ dst_map->user_wired -= entry->vme_end - entry->vme_start;
entry->user_wired_count = 0;
vm_map_copy_entry_unlink(copy, copy_entry);
@@ -2459,19 +2407,16 @@ start_pass_1:
* If successful, consumes the copy object.
* Otherwise, the caller is responsible for it.
*/
-kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
- register
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- register
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_size_t adjustment;
vm_offset_t start;
vm_offset_t vm_copy_start;
vm_map_entry_t last;
- register
vm_map_entry_t entry;
/*
@@ -2559,15 +2504,8 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Mismatches occur when dealing with the default
* pager.
*/
- kmem_cache_t old_cache;
vm_map_entry_t next, new;
- /*
- * Find the cache that the copies were allocated from
- */
- old_cache = (copy->cpy_hdr.entries_pageable)
- ? &vm_map_entry_cache
- : &vm_map_kentry_cache;
entry = vm_map_copy_first_entry(copy);
/*
@@ -2576,6 +2514,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
*/
copy->cpy_hdr.nentries = 0;
copy->cpy_hdr.entries_pageable = dst_map->hdr.entries_pageable;
+ rbtree_init(&copy->cpy_hdr.tree);
vm_map_copy_first_entry(copy) =
vm_map_copy_last_entry(copy) =
vm_map_copy_to_entry(copy);
@@ -2590,7 +2529,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
vm_map_copy_last_entry(copy),
new);
next = entry->vme_next;
- kmem_cache_free(old_cache, (vm_offset_t) entry);
+ kmem_cache_free(&vm_map_entry_cache, (vm_offset_t) entry);
entry = next;
}
}
@@ -2617,9 +2556,9 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* map the pages into the destination map.
*/
if (entry->wired_count != 0) {
- register vm_offset_t va;
- vm_offset_t offset;
- register vm_object_t object;
+ vm_offset_t va;
+ vm_offset_t offset;
+ vm_object_t object;
object = entry->object.vm_object;
offset = entry->offset;
@@ -2631,7 +2570,7 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
TRUE);
while (va < entry->vme_end) {
- register vm_page_t m;
+ vm_page_t m;
/*
* Look up the page in the object.
@@ -2716,19 +2655,16 @@ kern_return_t vm_map_copyout(dst_map, dst_addr, copy)
* Version of vm_map_copyout() for page list vm map copies.
*
*/
-kern_return_t vm_map_copyout_page_list(dst_map, dst_addr, copy)
- register
- vm_map_t dst_map;
- vm_offset_t *dst_addr; /* OUT */
- register
- vm_map_copy_t copy;
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy)
{
vm_size_t size;
vm_offset_t start;
vm_offset_t end;
vm_offset_t offset;
vm_map_entry_t last;
- register
vm_object_t object;
vm_page_t *page_list, m;
vm_map_entry_t entry;
@@ -2906,6 +2842,7 @@ create_object:
if (must_wire) {
entry->wired_count = 1;
+ dst_map->user_wired += entry->vme_end - entry->vme_start;
entry->user_wired_count = 1;
} else {
entry->wired_count = 0;
@@ -3106,12 +3043,12 @@ error:
* In/out conditions:
* The source map should not be locked on entry.
*/
-kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_entry_t tmp_entry; /* Result of last map lookup --
* in multi-level lookup, this
@@ -3125,7 +3062,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
vm_offset_t src_end; /* End of entire region to be
* copied */
- register
vm_map_copy_t copy; /* Resulting copy */
/*
@@ -3192,14 +3128,12 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
*/
while (TRUE) {
- register
vm_map_entry_t src_entry = tmp_entry; /* Top-level entry */
vm_size_t src_size; /* Size of source
* map entry (in both
* maps)
*/
- register
vm_object_t src_object; /* Object to copy */
vm_offset_t src_offset;
@@ -3208,7 +3142,6 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* for copy-on-write?
*/
- register
vm_map_entry_t new_entry; /* Map entry for copy */
boolean_t new_entry_needs_copy; /* Will new entry be COW? */
@@ -3472,11 +3405,11 @@ kern_return_t vm_map_copyin(src_map, src_addr, len, src_destroy, copy_result)
* Our caller donates an object reference.
*/
-kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
- vm_object_t object;
- vm_offset_t offset; /* offset of region in object */
- vm_size_t size; /* size of region in object */
- vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_object(
+ vm_object_t object,
+ vm_offset_t offset, /* offset of region in object */
+ vm_size_t size, /* size of region in object */
+ vm_map_copy_t *copy_result) /* OUT */
{
vm_map_copy_t copy; /* Resulting copy */
@@ -3517,12 +3450,12 @@ kern_return_t vm_map_copyin_object(object, offset, size, copy_result)
* the scheduler.
*/
-kern_return_t vm_map_copyin_page_list_cont(cont_args, copy_result)
-vm_map_copyin_args_t cont_args;
-vm_map_copy_t *copy_result; /* OUT */
+kern_return_t vm_map_copyin_page_list_cont(
+ vm_map_copyin_args_t cont_args,
+ vm_map_copy_t *copy_result) /* OUT */
{
kern_return_t result = 0; /* '=0' to quiet gcc warnings */
- register boolean_t do_abort, src_destroy, src_destroy_only;
+ boolean_t do_abort, src_destroy, src_destroy_only;
/*
* Check for cases that only require memory destruction.
@@ -3573,27 +3506,23 @@ vm_map_copy_t *copy_result; /* OUT */
* the recipient of this copy_result must be prepared to deal with it.
*/
-kern_return_t vm_map_copyin_page_list(src_map, src_addr, len, src_destroy,
- steal_pages, copy_result, is_cont)
- vm_map_t src_map;
- vm_offset_t src_addr;
- vm_size_t len;
- boolean_t src_destroy;
- boolean_t steal_pages;
- vm_map_copy_t *copy_result; /* OUT */
- boolean_t is_cont;
+kern_return_t vm_map_copyin_page_list(
+ vm_map_t src_map,
+ vm_offset_t src_addr,
+ vm_size_t len,
+ boolean_t src_destroy,
+ boolean_t steal_pages,
+ vm_map_copy_t *copy_result, /* OUT */
+ boolean_t is_cont)
{
vm_map_entry_t src_entry;
vm_page_t m;
vm_offset_t src_start;
vm_offset_t src_end;
vm_size_t src_size;
- register
vm_object_t src_object;
- register
vm_offset_t src_offset;
vm_offset_t src_last_offset;
- register
vm_map_copy_t copy; /* Resulting copy */
kern_return_t result = KERN_SUCCESS;
boolean_t need_map_lookup;
@@ -3927,7 +3856,7 @@ retry:
*/
src_start = trunc_page(src_addr);
if (steal_pages) {
- register int i;
+ int i;
vm_offset_t unwire_end;
unwire_end = src_start;
@@ -3999,6 +3928,8 @@ retry:
assert(src_entry->wired_count > 0);
src_entry->wired_count = 0;
+ if (src_entry->user_wired_count)
+ src_map->user_wired -= src_entry->vme_end - src_entry->vme_start;
src_entry->user_wired_count = 0;
unwire_end = src_entry->vme_end;
pmap_pageable(vm_map_pmap(src_map),
@@ -4104,18 +4035,14 @@ error:
*
* The source map must not be locked.
*/
-vm_map_t vm_map_fork(old_map)
- vm_map_t old_map;
+vm_map_t vm_map_fork(vm_map_t old_map)
{
vm_map_t new_map;
- register
vm_map_entry_t old_entry;
- register
vm_map_entry_t new_entry;
pmap_t new_pmap = pmap_create((vm_size_t) 0);
vm_size_t new_size = 0;
vm_size_t entry_size;
- register
vm_object_t object;
vm_map_lock(old_map);
@@ -4378,21 +4305,20 @@ vm_map_t vm_map_fork(old_map)
* copying operations, although the data referenced will
* remain the same.
*/
-kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
- object, offset, out_prot, wired)
- vm_map_t *var_map; /* IN/OUT */
- register vm_offset_t vaddr;
- register vm_prot_t fault_type;
-
- vm_map_version_t *out_version; /* OUT */
- vm_object_t *object; /* OUT */
- vm_offset_t *offset; /* OUT */
- vm_prot_t *out_prot; /* OUT */
- boolean_t *wired; /* OUT */
+kern_return_t vm_map_lookup(
+ vm_map_t *var_map, /* IN/OUT */
+ vm_offset_t vaddr,
+ vm_prot_t fault_type,
+
+ vm_map_version_t *out_version, /* OUT */
+ vm_object_t *object, /* OUT */
+ vm_offset_t *offset, /* OUT */
+ vm_prot_t *out_prot, /* OUT */
+ boolean_t *wired) /* OUT */
{
- register vm_map_entry_t entry;
- register vm_map_t map = *var_map;
- register vm_prot_t prot;
+ vm_map_entry_t entry;
+ vm_map_t map = *var_map;
+ vm_prot_t prot;
RetryLookup: ;
@@ -4560,11 +4486,9 @@ kern_return_t vm_map_lookup(var_map, vaddr, fault_type, out_version,
* since the given version. If successful, the map
* will not change until vm_map_verify_done() is called.
*/
-boolean_t vm_map_verify(map, version)
- register
- vm_map_t map;
- register
- vm_map_version_t *version; /* REF */
+boolean_t vm_map_verify(
+ vm_map_t map,
+ vm_map_version_t *version) /* REF */
{
boolean_t result;
@@ -4593,24 +4517,19 @@ boolean_t vm_map_verify(map, version)
* a task's address map.
*/
-kern_return_t vm_region(map, address, size,
- protection, max_protection,
- inheritance, is_shared,
- object_name, offset_in_object)
- vm_map_t map;
- vm_offset_t *address; /* IN/OUT */
- vm_size_t *size; /* OUT */
- vm_prot_t *protection; /* OUT */
- vm_prot_t *max_protection; /* OUT */
- vm_inherit_t *inheritance; /* OUT */
- boolean_t *is_shared; /* OUT */
- ipc_port_t *object_name; /* OUT */
- vm_offset_t *offset_in_object; /* OUT */
+kern_return_t vm_region(
+ vm_map_t map,
+ vm_offset_t *address, /* IN/OUT */
+ vm_size_t *size, /* OUT */
+ vm_prot_t *protection, /* OUT */
+ vm_prot_t *max_protection, /* OUT */
+ vm_inherit_t *inheritance, /* OUT */
+ boolean_t *is_shared, /* OUT */
+ ipc_port_t *object_name, /* OUT */
+ vm_offset_t *offset_in_object) /* OUT */
{
vm_map_entry_t tmp_entry;
- register
vm_map_entry_t entry;
- register
vm_offset_t tmp_offset;
vm_offset_t start;
@@ -4667,9 +4586,9 @@ kern_return_t vm_region(map, address, size,
* at allocation time because the adjacent entry
* is often wired down.
*/
-void vm_map_simplify(map, start)
- vm_map_t map;
- vm_offset_t start;
+void vm_map_simplify(
+ vm_map_t map,
+ vm_offset_t start)
{
vm_map_entry_t this_entry;
vm_map_entry_t prev_entry;
@@ -4728,12 +4647,12 @@ void vm_map_simplify(map, start)
* it itself. [This assumes that attributes do not
* need to be inherited, which seems ok to me]
*/
-kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_offset_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_map_machine_attribute(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
kern_return_t ret;
@@ -4758,25 +4677,30 @@ kern_return_t vm_map_machine_attribute(map, address, size, attribute, value)
/*
* vm_map_print: [ debug ]
*/
-void vm_map_print(map)
- register vm_map_t map;
+void vm_map_print(db_expr_t addr, boolean_t have_addr, db_expr_t count, const char *modif)
{
- register vm_map_entry_t entry;
+ vm_map_t map;
+ vm_map_entry_t entry;
+
+ if (!have_addr)
+ map = current_thread()->task->map;
+ else
+ map = (vm_map_t)addr;
- iprintf("Task map 0x%X: pmap=0x%X,",
+ iprintf("Map 0x%X: pmap=0x%X,",
(vm_offset_t) map, (vm_offset_t) (map->pmap));
printf("ref=%d,nentries=%d,", map->ref_count, map->hdr.nentries);
printf("version=%d\n", map->timestamp);
- indent += 2;
+ indent += 1;
for (entry = vm_map_first_entry(map);
entry != vm_map_to_entry(map);
entry = entry->vme_next) {
static char *inheritance_name[3] = { "share", "copy", "none"};
iprintf("map entry 0x%X: ", (vm_offset_t) entry);
- printf("start=0x%X, end=0x%X, ",
+ printf("start=0x%X, end=0x%X\n",
(vm_offset_t) entry->vme_start, (vm_offset_t) entry->vme_end);
- printf("prot=%X/%X/%s, ",
+ iprintf("prot=%X/%X/%s, ",
entry->protection,
entry->max_protection,
inheritance_name[entry->inheritance]);
@@ -4811,13 +4735,13 @@ void vm_map_print(map)
if ((entry->vme_prev == vm_map_to_entry(map)) ||
(entry->vme_prev->object.vm_object != entry->object.vm_object)) {
- indent += 2;
+ indent += 1;
vm_object_print(entry->object.vm_object);
- indent -= 2;
+ indent -= 1;
}
}
}
- indent -= 2;
+ indent -= 1;
}
/*
@@ -4827,13 +4751,13 @@ void vm_map_print(map)
*/
void vm_map_copy_print(copy)
- vm_map_copy_t copy;
+ const vm_map_copy_t copy;
{
int i, npages;
printf("copy object 0x%x\n", copy);
- indent += 2;
+ indent += 1;
iprintf("type=%d", copy->type);
switch (copy->type) {
@@ -4887,6 +4811,6 @@ void vm_map_copy_print(copy)
break;
}
- indent -=2;
+ indent -= 1;
}
#endif /* MACH_KDB */
diff --git a/vm/vm_map.h b/vm/vm_map.h
index 5fdac4e6..b4ba7c7b 100644
--- a/vm/vm_map.h
+++ b/vm/vm_map.h
@@ -52,10 +52,10 @@
#include <vm/vm_types.h>
#include <kern/lock.h>
#include <kern/rbtree.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
/* TODO: make it dynamic */
-#define KENTRY_DATA_SIZE (64*PAGE_SIZE)
+#define KENTRY_DATA_SIZE (256*PAGE_SIZE)
/*
* Types defined:
@@ -170,14 +170,18 @@ struct vm_map {
#define max_offset hdr.links.end /* end of range */
pmap_t pmap; /* Physical map */
vm_size_t size; /* virtual size */
+ vm_size_t user_wired; /* wired by user size */
int ref_count; /* Reference count */
decl_simple_lock_data(, ref_lock) /* Lock for ref_count field */
vm_map_entry_t hint; /* hint for quick lookups */
decl_simple_lock_data(, hint_lock) /* lock for hint storage */
vm_map_entry_t first_free; /* First free space hint */
- boolean_t wait_for_space; /* Should callers wait
+
+ /* Flags */
+ unsigned int wait_for_space:1, /* Should callers wait
for space? */
- boolean_t wiring_required;/* All memory wired? */
+ /* boolean_t */ wiring_required:1; /* All memory wired? */
+
unsigned int timestamp; /* Version number */
};
@@ -359,9 +363,6 @@ MACRO_END
* Exported procedures that operate on vm_map_t.
*/
-extern vm_offset_t kentry_data;
-extern vm_size_t kentry_data_size;
-extern int kentry_count;
/* Initialize the module */
extern void vm_map_init(void);
@@ -437,6 +438,23 @@ extern kern_return_t vm_map_machine_attribute(vm_map_t, vm_offset_t,
/* Delete entry from map */
extern void vm_map_entry_delete(vm_map_t, vm_map_entry_t);
+kern_return_t vm_map_delete(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_offset_t end);
+
+kern_return_t vm_map_copyout_page_list(
+ vm_map_t dst_map,
+ vm_offset_t *dst_addr, /* OUT */
+ vm_map_copy_t copy);
+
+void vm_map_copy_page_discard (vm_map_copy_t copy);
+
+boolean_t vm_map_lookup_entry(
+ vm_map_t map,
+ vm_offset_t address,
+ vm_map_entry_t *entry); /* OUT */
+
/*
* Functions implemented as macros
*/
@@ -538,6 +556,9 @@ extern void _vm_map_clip_start(
* the specified address; if necessary,
* it splits the entry into two.
*/
-void _vm_map_clip_end();
+void _vm_map_clip_end(
+ struct vm_map_header *map_header,
+ vm_map_entry_t entry,
+ vm_offset_t end);
#endif /* _VM_VM_MAP_H_ */
diff --git a/vm/vm_object.c b/vm/vm_object.c
index 526b6f33..bc301288 100644
--- a/vm/vm_object.c
+++ b/vm/vm_object.c
@@ -59,7 +59,6 @@
#include <ddb/db_output.h>
#endif /* MACH_KDB */
-
void memory_object_release(
ipc_port_t pager,
pager_request_t pager_request,
@@ -231,9 +230,11 @@ static void _vm_object_setup(
vm_object_t _vm_object_allocate(
vm_size_t size)
{
- register vm_object_t object;
+ vm_object_t object;
object = (vm_object_t) kmem_cache_alloc(&vm_object_cache);
+ if (!object)
+ return 0;
_vm_object_setup(object, size);
@@ -243,10 +244,12 @@ vm_object_t _vm_object_allocate(
vm_object_t vm_object_allocate(
vm_size_t size)
{
- register vm_object_t object;
- register ipc_port_t port;
+ vm_object_t object;
+ ipc_port_t port;
object = _vm_object_allocate(size);
+ if (object == 0)
+ panic("vm_object_allocate");
port = ipc_port_alloc_kernel();
if (port == IP_NULL)
panic("vm_object_allocate");
@@ -264,7 +267,7 @@ vm_object_t vm_object_allocate(
void vm_object_bootstrap(void)
{
kmem_cache_init(&vm_object_cache, "vm_object",
- sizeof(struct vm_object), 0, NULL, NULL, NULL, 0);
+ sizeof(struct vm_object), 0, NULL, 0);
queue_init(&vm_object_cached_list);
simple_lock_init(&vm_object_cached_lock_data);
@@ -406,7 +409,7 @@ void vm_object_collect(
* Gets another reference to the given object.
*/
void vm_object_reference(
- register vm_object_t object)
+ vm_object_t object)
{
if (object == VM_OBJECT_NULL)
return;
@@ -429,7 +432,7 @@ void vm_object_reference(
* No object may be locked.
*/
void vm_object_deallocate(
- register vm_object_t object)
+ vm_object_t object)
{
vm_object_t temp;
@@ -525,10 +528,10 @@ void vm_object_deallocate(
* object will cease to exist.
*/
void vm_object_terminate(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_page_t p;
- vm_object_t shadow_object;
+ vm_page_t p;
+ vm_object_t shadow_object;
/*
* Make sure the object isn't already being terminated
@@ -577,10 +580,6 @@ void vm_object_terminate(
VM_PAGE_CHECK(p);
- if (p->busy && !p->absent)
- panic("vm_object_terminate.2 0x%x 0x%x",
- object, p);
-
VM_PAGE_FREE(p);
}
} else while (!queue_empty(&object->memq)) {
@@ -588,9 +587,6 @@ void vm_object_terminate(
VM_PAGE_CHECK(p);
- if (p->busy && !p->absent)
- panic("vm_object_terminate.3 0x%x 0x%x", object, p);
-
vm_page_lock_queues();
VM_PAGE_QUEUES_REMOVE(p);
vm_page_unlock_queues();
@@ -608,9 +604,6 @@ void vm_object_terminate(
goto free_page;
}
- if (p->fictitious)
- panic("vm_object_terminate.4 0x%x 0x%x", object, p);
-
if (!p->dirty)
p->dirty = pmap_is_modified(p->phys_addr);
@@ -732,7 +725,6 @@ void memory_object_release(
void vm_object_abort_activity(
vm_object_t object)
{
- register
vm_page_t p;
vm_page_t next;
@@ -786,17 +778,12 @@ void vm_object_abort_activity(
* or from port destruction handling (via vm_object_destroy).
*/
kern_return_t memory_object_destroy(
- register
vm_object_t object,
kern_return_t reason)
{
ipc_port_t old_object, old_name;
pager_request_t old_control;
-#ifdef lint
- reason++;
-#endif /* lint */
-
if (object == VM_OBJECT_NULL)
return KERN_SUCCESS;
@@ -889,8 +876,8 @@ kern_return_t memory_object_destroy(
boolean_t vm_object_pmap_protect_by_page = FALSE;
void vm_object_pmap_protect(
- register vm_object_t object,
- register vm_offset_t offset,
+ vm_object_t object,
+ vm_offset_t offset,
vm_size_t size,
pmap_t pmap,
vm_offset_t pmap_start,
@@ -912,8 +899,8 @@ void vm_object_pmap_protect(
}
{
- register vm_page_t p;
- register vm_offset_t end;
+ vm_page_t p;
+ vm_offset_t end;
end = offset + size;
@@ -944,7 +931,7 @@ void vm_object_pmap_protect(
* Must follow shadow chain to remove access
* to pages in shadowed objects.
*/
- register vm_object_t next_object;
+ vm_object_t next_object;
next_object = object->shadow;
if (next_object != VM_OBJECT_NULL) {
@@ -981,11 +968,11 @@ void vm_object_pmap_protect(
* The object must *not* be locked.
*/
void vm_object_pmap_remove(
- register vm_object_t object,
- register vm_offset_t start,
- register vm_offset_t end)
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register vm_page_t p;
+ vm_page_t p;
if (object == VM_OBJECT_NULL)
return;
@@ -1031,7 +1018,6 @@ void vm_object_pmap_remove(
* VM_OBJECT_NULL.
*/
kern_return_t vm_object_copy_slowly(
- register
vm_object_t src_object,
vm_offset_t src_offset,
vm_size_t size,
@@ -1085,7 +1071,6 @@ kern_return_t vm_object_copy_slowly(
vm_prot_t prot = VM_PROT_READ;
vm_page_t _result_page;
vm_page_t top_page;
- register
vm_page_t result_page;
vm_object_lock(src_object);
@@ -1205,8 +1190,6 @@ kern_return_t vm_object_copy_slowly(
* The object should be unlocked on entry and exit.
*/
-vm_object_t vm_object_copy_delayed(); /* forward declaration */
-
boolean_t vm_object_copy_temporary(
vm_object_t *_object, /* INOUT */
vm_offset_t *_offset, /* INOUT */
@@ -1215,10 +1198,6 @@ boolean_t vm_object_copy_temporary(
{
vm_object_t object = *_object;
-#ifdef lint
- ++*_offset;
-#endif /* lint */
-
if (object == VM_OBJECT_NULL) {
*_src_needs_copy = FALSE;
*_dst_needs_copy = FALSE;
@@ -1318,16 +1297,6 @@ kern_return_t vm_object_copy_call(
vm_page_t p;
/*
- * Set the backing object for the new
- * temporary object.
- */
-
- assert(src_object->ref_count > 0);
- src_object->ref_count++;
- vm_object_paging_begin(src_object);
- vm_object_unlock(src_object);
-
- /*
* Create a memory object port to be associated
* with this new vm_object.
*
@@ -1340,10 +1309,18 @@ kern_return_t vm_object_copy_call(
*/
new_memory_object = ipc_port_alloc_kernel();
- if (new_memory_object == IP_NULL) {
- panic("vm_object_copy_call: allocate memory object port");
- /* XXX Shouldn't panic here. */
- }
+ if (new_memory_object == IP_NULL)
+ return KERN_RESOURCE_SHORTAGE;
+
+ /*
+ * Set the backing object for the new
+ * temporary object.
+ */
+
+ assert(src_object->ref_count > 0);
+ src_object->ref_count++;
+ vm_object_paging_begin(src_object);
+ vm_object_unlock(src_object);
/* we hold a naked receive right for new_memory_object */
(void) ipc_port_make_send(new_memory_object);
@@ -1448,7 +1425,7 @@ vm_object_t vm_object_copy_delayed(
* synchronization required in the "push"
* operation described above.
*
- * The copy-on-write is said to be assymetric because
+ * The copy-on-write is said to be asymmetric because
* the original object is *not* marked copy-on-write.
* A copied page is pushed to the copy object, regardless
* which party attempted to modify the page.
@@ -1581,7 +1558,6 @@ vm_object_t vm_object_copy_delayed(
* and may be interrupted.
*/
kern_return_t vm_object_copy_strategically(
- register
vm_object_t src_object,
vm_offset_t src_offset,
vm_size_t size,
@@ -1694,8 +1670,8 @@ void vm_object_shadow(
vm_offset_t *offset, /* IN/OUT */
vm_size_t length)
{
- register vm_object_t source;
- register vm_object_t result;
+ vm_object_t source;
+ vm_object_t result;
source = *object;
@@ -1955,7 +1931,6 @@ vm_object_t vm_object_enter(
vm_size_t size,
boolean_t internal)
{
- register
vm_object_t object;
vm_object_t new_object;
boolean_t must_init;
@@ -2169,7 +2144,6 @@ restart:
* daemon will be using this routine.
*/
void vm_object_pager_create(
- register
vm_object_t object)
{
ipc_port_t pager;
@@ -2314,14 +2288,14 @@ boolean_t vm_object_collapse_bypass_allowed = TRUE;
* so the caller should hold a reference for the object.
*/
void vm_object_collapse(
- register vm_object_t object)
+ vm_object_t object)
{
- register vm_object_t backing_object;
- register vm_offset_t backing_offset;
- register vm_size_t size;
- register vm_offset_t new_offset;
- register vm_page_t p, pp;
- ipc_port_t old_name_port;
+ vm_object_t backing_object;
+ vm_offset_t backing_offset;
+ vm_size_t size;
+ vm_offset_t new_offset;
+ vm_page_t p, pp;
+ ipc_port_t old_name_port;
if (!vm_object_collapse_allowed)
return;
@@ -2446,34 +2420,9 @@ void vm_object_collapse(
VM_PAGE_FREE(p);
}
else {
- if (pp != VM_PAGE_NULL) {
- /*
- * Parent has an absent page...
- * it's not being paged in, so
- * it must really be missing from
- * the parent.
- *
- * Throw out the absent page...
- * any faults looking for that
- * page will restart with the new
- * one.
- */
-
- /*
- * This should never happen -- the
- * parent cannot have ever had an
- * external memory object, and thus
- * cannot have absent pages.
- */
- panic("vm_object_collapse: bad case");
-
- VM_PAGE_FREE(pp);
-
- /*
- * Fall through to move the backing
- * object's page up.
- */
- }
+ assert(pp == VM_PAGE_NULL || !
+ "vm_object_collapse: bad case");
+
/*
* Parent now has no page.
* Move the backing object's page up.
@@ -2692,11 +2641,11 @@ unsigned int vm_object_page_remove_lookup = 0;
unsigned int vm_object_page_remove_iterate = 0;
void vm_object_page_remove(
- register vm_object_t object,
- register vm_offset_t start,
- register vm_offset_t end)
+ vm_object_t object,
+ vm_offset_t start,
+ vm_offset_t end)
{
- register vm_page_t p, next;
+ vm_page_t p, next;
/*
* One and two page removals are most popular.
@@ -2757,7 +2706,7 @@ void vm_object_page_remove(
*/
boolean_t vm_object_coalesce(
- register vm_object_t prev_object,
+ vm_object_t prev_object,
vm_object_t next_object,
vm_offset_t prev_offset,
vm_offset_t next_offset,
@@ -2766,10 +2715,6 @@ boolean_t vm_object_coalesce(
{
vm_size_t newsize;
-#ifdef lint
- next_offset++;
-#endif /* lint */
-
if (next_object != VM_OBJECT_NULL) {
return FALSE;
}
@@ -2898,7 +2843,8 @@ vm_object_page_map(
VM_PAGE_FREE(old_page);
}
- vm_page_init(m, addr);
+ vm_page_init(m);
+ m->phys_addr = addr;
m->private = TRUE; /* don`t free page */
m->wire_count = 1;
vm_page_lock_queues();
@@ -2923,20 +2869,21 @@ boolean_t vm_object_print_pages = FALSE;
void vm_object_print(
vm_object_t object)
{
- register vm_page_t p;
+ vm_page_t p;
- register int count;
+ int count;
if (object == VM_OBJECT_NULL)
return;
- iprintf("Object 0x%X: size=0x%X",
- (vm_offset_t) object, (vm_offset_t) object->size);
- printf(", %d references, %lu resident pages,", object->ref_count,
- object->resident_page_count);
+ iprintf("Object 0x%X: size=0x%X, %d references",
+ (vm_offset_t) object, (vm_offset_t) object->size,
+ object->ref_count);
+ printf("\n");
+ iprintf("%lu resident pages,", object->resident_page_count);
printf(" %d absent pages,", object->absent_count);
printf(" %d paging ops\n", object->paging_in_progress);
- indent += 2;
+ indent += 1;
iprintf("memory object=0x%X (offset=0x%X),",
(vm_offset_t) object->pager, (vm_offset_t) object->paging_offset);
printf("control=0x%X, name=0x%X\n",
@@ -2955,7 +2902,7 @@ void vm_object_print(
(vm_offset_t) object->shadow, (vm_offset_t) object->shadow_offset);
printf("copy=0x%X\n", (vm_offset_t) object->copy);
- indent += 2;
+ indent += 1;
if (vm_object_print_pages) {
count = 0;
@@ -2972,7 +2919,7 @@ void vm_object_print(
if (count != 0)
printf("\n");
}
- indent -= 4;
+ indent -= 2;
}
#endif /* MACH_KDB */
diff --git a/vm/vm_object.h b/vm/vm_object.h
index 6b9f0bcf..eb8a0c28 100644
--- a/vm/vm_object.h
+++ b/vm/vm_object.h
@@ -45,7 +45,7 @@
#include <kern/lock.h>
#include <kern/assert.h>
#include <kern/debug.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <vm/pmap.h>
#include <ipc/ipc_types.h>
@@ -62,7 +62,7 @@ typedef struct ipc_port * pager_request_t;
*/
struct vm_object {
- queue_chain_t memq; /* Resident memory */
+ queue_head_t memq; /* Resident memory */
decl_simple_lock_data(, Lock) /* Synchronization */
#if VM_OBJECT_DEBUG
thread_t LockHolder; /* Thread holding Lock */
@@ -247,6 +247,16 @@ extern boolean_t vm_object_coalesce(
extern void vm_object_pager_wakeup(ipc_port_t pager);
+void memory_object_release(
+ ipc_port_t pager,
+ pager_request_t pager_request,
+ ipc_port_t pager_name);
+
+void vm_object_deactivate_pages(vm_object_t);
+
+vm_object_t vm_object_copy_delayed(
+ vm_object_t src_object);
+
/*
* Event waiting handling
*/
diff --git a/vm/vm_page.c b/vm/vm_page.c
new file mode 100644
index 00000000..a868fce8
--- /dev/null
+++ b/vm/vm_page.c
@@ -0,0 +1,782 @@
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * This implementation uses the binary buddy system to manage its heap.
+ * Descriptions of the buddy system can be found in the following works :
+ * - "UNIX Internals: The New Frontiers", by Uresh Vahalia.
+ * - "Dynamic Storage Allocation: A Survey and Critical Review",
+ * by Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles.
+ *
+ * In addition, this allocator uses per-CPU pools of pages for order 0
+ * (i.e. single page) allocations. These pools act as caches (but are named
+ * differently to avoid confusion with CPU caches) that reduce contention on
+ * multiprocessor systems. When a pool is empty and cannot provide a page,
+ * it is filled by transferring multiple pages from the backend buddy system.
+ * The symmetric case is handled likewise.
+ */
+
+#include <string.h>
+#include <kern/assert.h>
+#include <kern/cpu_number.h>
+#include <kern/debug.h>
+#include <kern/list.h>
+#include <kern/lock.h>
+#include <kern/macros.h>
+#include <kern/printf.h>
+#include <kern/thread.h>
+#include <mach/vm_param.h>
+#include <machine/pmap.h>
+#include <sys/types.h>
+#include <vm/vm_page.h>
+
+#define __init
+#define __initdata
+#define __read_mostly
+
+#define thread_pin()
+#define thread_unpin()
+
+/*
+ * Number of free block lists per segment.
+ */
+#define VM_PAGE_NR_FREE_LISTS 11
+
+/*
+ * The size of a CPU pool is computed by dividing the number of pages in its
+ * containing segment by this value.
+ */
+#define VM_PAGE_CPU_POOL_RATIO 1024
+
+/*
+ * Maximum number of pages in a CPU pool.
+ */
+#define VM_PAGE_CPU_POOL_MAX_SIZE 128
+
+/*
+ * The transfer size of a CPU pool is computed by dividing the pool size by
+ * this value.
+ */
+#define VM_PAGE_CPU_POOL_TRANSFER_RATIO 2
+
+/*
+ * Per-processor cache of pages.
+ */
+struct vm_page_cpu_pool {
+ simple_lock_data_t lock;
+ int size;
+ int transfer_size;
+ int nr_pages;
+ struct list pages;
+} __aligned(CPU_L1_SIZE);
+
+/*
+ * Special order value for pages that aren't in a free list. Such pages are
+ * either allocated, or part of a free block of pages but not the head page.
+ */
+#define VM_PAGE_ORDER_UNLISTED ((unsigned short)-1)
+
+/*
+ * Doubly-linked list of free blocks.
+ */
+struct vm_page_free_list {
+ unsigned long size;
+ struct list blocks;
+};
+
+/*
+ * Segment name buffer size.
+ */
+#define VM_PAGE_NAME_SIZE 16
+
+/*
+ * Segment of contiguous memory.
+ */
+struct vm_page_seg {
+ struct vm_page_cpu_pool cpu_pools[NCPUS];
+
+ phys_addr_t start;
+ phys_addr_t end;
+ struct vm_page *pages;
+ struct vm_page *pages_end;
+ simple_lock_data_t lock;
+ struct vm_page_free_list free_lists[VM_PAGE_NR_FREE_LISTS];
+ unsigned long nr_free_pages;
+};
+
+/*
+ * Bootstrap information about a segment.
+ */
+struct vm_page_boot_seg {
+ phys_addr_t start;
+ phys_addr_t end;
+ phys_addr_t avail_start;
+ phys_addr_t avail_end;
+};
+
+static int vm_page_is_ready __read_mostly;
+
+/*
+ * Segment table.
+ *
+ * The system supports a maximum of 4 segments :
+ * - DMA: suitable for DMA
+ * - DMA32: suitable for DMA when devices support 32-bits addressing
+ * - DIRECTMAP: direct physical mapping, allows direct access from
+ * the kernel with a simple offset translation
+ * - HIGHMEM: must be mapped before it can be accessed
+ *
+ * Segments are ordered by priority, 0 being the lowest priority. Their
+ * relative priorities are DMA < DMA32 < DIRECTMAP < HIGHMEM. Some segments
+ * may actually be aliases for others, e.g. if DMA is always possible from
+ * the direct physical mapping, DMA and DMA32 are aliases for DIRECTMAP,
+ * in which case the segment table contains DIRECTMAP and HIGHMEM only.
+ */
+static struct vm_page_seg vm_page_segs[VM_PAGE_MAX_SEGS];
+
+/*
+ * Bootstrap segment table.
+ */
+static struct vm_page_boot_seg vm_page_boot_segs[VM_PAGE_MAX_SEGS] __initdata;
+
+/*
+ * Number of loaded segments.
+ */
+static unsigned int vm_page_segs_size __read_mostly;
+
+static void __init
+vm_page_init_pa(struct vm_page *page, unsigned short seg_index, phys_addr_t pa)
+{
+ memset(page, 0, sizeof(*page));
+ vm_page_init(page); /* vm_resident members */
+ page->type = VM_PT_RESERVED;
+ page->seg_index = seg_index;
+ page->order = VM_PAGE_ORDER_UNLISTED;
+ page->priv = NULL;
+ page->phys_addr = pa;
+}
+
+void
+vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type)
+{
+ unsigned int i, nr_pages;
+
+ nr_pages = 1 << order;
+
+ for (i = 0; i < nr_pages; i++)
+ page[i].type = type;
+}
+
+static void __init
+vm_page_free_list_init(struct vm_page_free_list *free_list)
+{
+ free_list->size = 0;
+ list_init(&free_list->blocks);
+}
+
+static inline void
+vm_page_free_list_insert(struct vm_page_free_list *free_list,
+ struct vm_page *page)
+{
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+
+ free_list->size++;
+ list_insert_head(&free_list->blocks, &page->node);
+}
+
+static inline void
+vm_page_free_list_remove(struct vm_page_free_list *free_list,
+ struct vm_page *page)
+{
+ assert(page->order != VM_PAGE_ORDER_UNLISTED);
+
+ free_list->size--;
+ list_remove(&page->node);
+}
+
+static struct vm_page *
+vm_page_seg_alloc_from_buddy(struct vm_page_seg *seg, unsigned int order)
+{
+ struct vm_page_free_list *free_list = free_list;
+ struct vm_page *page, *buddy;
+ unsigned int i;
+
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) {
+ free_list = &seg->free_lists[i];
+
+ if (free_list->size != 0)
+ break;
+ }
+
+ if (i == VM_PAGE_NR_FREE_LISTS)
+ return NULL;
+
+ page = list_first_entry(&free_list->blocks, struct vm_page, node);
+ vm_page_free_list_remove(free_list, page);
+ page->order = VM_PAGE_ORDER_UNLISTED;
+
+ while (i > order) {
+ i--;
+ buddy = &page[1 << i];
+ vm_page_free_list_insert(&seg->free_lists[i], buddy);
+ buddy->order = i;
+ }
+
+ seg->nr_free_pages -= (1 << order);
+ return page;
+}
+
+static void
+vm_page_seg_free_to_buddy(struct vm_page_seg *seg, struct vm_page *page,
+ unsigned int order)
+{
+ struct vm_page *buddy;
+ phys_addr_t pa, buddy_pa;
+ unsigned int nr_pages;
+
+ assert(page >= seg->pages);
+ assert(page < seg->pages_end);
+ assert(page->order == VM_PAGE_ORDER_UNLISTED);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ nr_pages = (1 << order);
+ pa = page->phys_addr;
+
+ while (order < (VM_PAGE_NR_FREE_LISTS - 1)) {
+ buddy_pa = pa ^ vm_page_ptoa(1 << order);
+
+ if ((buddy_pa < seg->start) || (buddy_pa >= seg->end))
+ break;
+
+ buddy = &seg->pages[vm_page_atop(buddy_pa - seg->start)];
+
+ if (buddy->order != order)
+ break;
+
+ vm_page_free_list_remove(&seg->free_lists[order], buddy);
+ buddy->order = VM_PAGE_ORDER_UNLISTED;
+ order++;
+ pa &= -vm_page_ptoa(1 << order);
+ page = &seg->pages[vm_page_atop(pa - seg->start)];
+ }
+
+ vm_page_free_list_insert(&seg->free_lists[order], page);
+ page->order = order;
+ seg->nr_free_pages += nr_pages;
+}
+
+static void __init
+vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size)
+{
+ simple_lock_init(&cpu_pool->lock);
+ cpu_pool->size = size;
+ cpu_pool->transfer_size = (size + VM_PAGE_CPU_POOL_TRANSFER_RATIO - 1)
+ / VM_PAGE_CPU_POOL_TRANSFER_RATIO;
+ cpu_pool->nr_pages = 0;
+ list_init(&cpu_pool->pages);
+}
+
+static inline struct vm_page_cpu_pool *
+vm_page_cpu_pool_get(struct vm_page_seg *seg)
+{
+ return &seg->cpu_pools[cpu_number()];
+}
+
+static inline struct vm_page *
+vm_page_cpu_pool_pop(struct vm_page_cpu_pool *cpu_pool)
+{
+ struct vm_page *page;
+
+ assert(cpu_pool->nr_pages != 0);
+ cpu_pool->nr_pages--;
+ page = list_first_entry(&cpu_pool->pages, struct vm_page, node);
+ list_remove(&page->node);
+ return page;
+}
+
+static inline void
+vm_page_cpu_pool_push(struct vm_page_cpu_pool *cpu_pool, struct vm_page *page)
+{
+ assert(cpu_pool->nr_pages < cpu_pool->size);
+ cpu_pool->nr_pages++;
+ list_insert_head(&cpu_pool->pages, &page->node);
+}
+
+static int
+vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == 0);
+
+ simple_lock(&seg->lock);
+
+ for (i = 0; i < cpu_pool->transfer_size; i++) {
+ page = vm_page_seg_alloc_from_buddy(seg, 0);
+
+ if (page == NULL)
+ break;
+
+ vm_page_cpu_pool_push(cpu_pool, page);
+ }
+
+ simple_unlock(&seg->lock);
+
+ return i;
+}
+
+static void
+vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool,
+ struct vm_page_seg *seg)
+{
+ struct vm_page *page;
+ int i;
+
+ assert(cpu_pool->nr_pages == cpu_pool->size);
+
+ simple_lock(&seg->lock);
+
+ for (i = cpu_pool->transfer_size; i > 0; i--) {
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ vm_page_seg_free_to_buddy(seg, page, 0);
+ }
+
+ simple_unlock(&seg->lock);
+}
+
+static phys_addr_t __init
+vm_page_seg_size(struct vm_page_seg *seg)
+{
+ return seg->end - seg->start;
+}
+
+static int __init
+vm_page_seg_compute_pool_size(struct vm_page_seg *seg)
+{
+ phys_addr_t size;
+
+ size = vm_page_atop(vm_page_seg_size(seg)) / VM_PAGE_CPU_POOL_RATIO;
+
+ if (size == 0)
+ size = 1;
+ else if (size > VM_PAGE_CPU_POOL_MAX_SIZE)
+ size = VM_PAGE_CPU_POOL_MAX_SIZE;
+
+ return size;
+}
+
+static void __init
+vm_page_seg_init(struct vm_page_seg *seg, phys_addr_t start, phys_addr_t end,
+ struct vm_page *pages)
+{
+ phys_addr_t pa;
+ int pool_size;
+ unsigned int i;
+
+ seg->start = start;
+ seg->end = end;
+ pool_size = vm_page_seg_compute_pool_size(seg);
+
+ for (i = 0; i < ARRAY_SIZE(seg->cpu_pools); i++)
+ vm_page_cpu_pool_init(&seg->cpu_pools[i], pool_size);
+
+ seg->pages = pages;
+ seg->pages_end = pages + vm_page_atop(vm_page_seg_size(seg));
+ simple_lock_init(&seg->lock);
+
+ for (i = 0; i < ARRAY_SIZE(seg->free_lists); i++)
+ vm_page_free_list_init(&seg->free_lists[i]);
+
+ seg->nr_free_pages = 0;
+ i = seg - vm_page_segs;
+
+ for (pa = seg->start; pa < seg->end; pa += PAGE_SIZE)
+ vm_page_init_pa(&pages[vm_page_atop(pa - seg->start)], i, pa);
+}
+
+static struct vm_page *
+vm_page_seg_alloc(struct vm_page_seg *seg, unsigned int order,
+ unsigned short type)
+{
+ struct vm_page_cpu_pool *cpu_pool;
+ struct vm_page *page;
+ int filled;
+
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ if (order == 0) {
+ thread_pin();
+ cpu_pool = vm_page_cpu_pool_get(seg);
+ simple_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == 0) {
+ filled = vm_page_cpu_pool_fill(cpu_pool, seg);
+
+ if (!filled) {
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ return NULL;
+ }
+ }
+
+ page = vm_page_cpu_pool_pop(cpu_pool);
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ } else {
+ simple_lock(&seg->lock);
+ page = vm_page_seg_alloc_from_buddy(seg, order);
+ simple_unlock(&seg->lock);
+
+ if (page == NULL)
+ return NULL;
+ }
+
+ assert(page->type == VM_PT_FREE);
+ vm_page_set_type(page, order, type);
+ return page;
+}
+
+static void
+vm_page_seg_free(struct vm_page_seg *seg, struct vm_page *page,
+ unsigned int order)
+{
+ struct vm_page_cpu_pool *cpu_pool;
+
+ assert(page->type != VM_PT_FREE);
+ assert(order < VM_PAGE_NR_FREE_LISTS);
+
+ vm_page_set_type(page, order, VM_PT_FREE);
+
+ if (order == 0) {
+ thread_pin();
+ cpu_pool = vm_page_cpu_pool_get(seg);
+ simple_lock(&cpu_pool->lock);
+
+ if (cpu_pool->nr_pages == cpu_pool->size)
+ vm_page_cpu_pool_drain(cpu_pool, seg);
+
+ vm_page_cpu_pool_push(cpu_pool, page);
+ simple_unlock(&cpu_pool->lock);
+ thread_unpin();
+ } else {
+ simple_lock(&seg->lock);
+ vm_page_seg_free_to_buddy(seg, page, order);
+ simple_unlock(&seg->lock);
+ }
+}
+
+void __init
+vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end)
+{
+ struct vm_page_boot_seg *seg;
+
+ assert(seg_index < ARRAY_SIZE(vm_page_boot_segs));
+ assert(vm_page_aligned(start));
+ assert(vm_page_aligned(end));
+ assert(vm_page_aligned(avail_start));
+ assert(vm_page_aligned(avail_end));
+ assert(start < end);
+ assert(start <= avail_start);
+ assert(avail_end <= end);
+ assert(vm_page_segs_size < ARRAY_SIZE(vm_page_boot_segs));
+
+ seg = &vm_page_boot_segs[seg_index];
+ seg->start = start;
+ seg->end = end;
+ seg->avail_start = avail_start;
+ seg->avail_end = avail_end;
+ vm_page_segs_size++;
+}
+
+int
+vm_page_ready(void)
+{
+ return vm_page_is_ready;
+}
+
+static unsigned int
+vm_page_select_alloc_seg(unsigned int selector)
+{
+ unsigned int seg_index;
+
+ switch (selector) {
+ case VM_PAGE_SEL_DMA:
+ seg_index = VM_PAGE_SEG_DMA;
+ break;
+ case VM_PAGE_SEL_DMA32:
+ seg_index = VM_PAGE_SEG_DMA32;
+ break;
+ case VM_PAGE_SEL_DIRECTMAP:
+ seg_index = VM_PAGE_SEG_DIRECTMAP;
+ break;
+ case VM_PAGE_SEL_HIGHMEM:
+ seg_index = VM_PAGE_SEG_HIGHMEM;
+ break;
+ default:
+ panic("vm_page: invalid selector");
+ }
+
+ return MIN(vm_page_segs_size - 1, seg_index);
+}
+
+static int __init
+vm_page_boot_seg_loaded(const struct vm_page_boot_seg *seg)
+{
+ return (seg->end != 0);
+}
+
+static void __init
+vm_page_check_boot_segs(void)
+{
+ unsigned int i;
+ int expect_loaded;
+
+ if (vm_page_segs_size == 0)
+ panic("vm_page: no physical memory loaded");
+
+ for (i = 0; i < ARRAY_SIZE(vm_page_boot_segs); i++) {
+ expect_loaded = (i < vm_page_segs_size);
+
+ if (vm_page_boot_seg_loaded(&vm_page_boot_segs[i]) == expect_loaded)
+ continue;
+
+ panic("vm_page: invalid boot segment table");
+ }
+}
+
+static phys_addr_t __init
+vm_page_boot_seg_size(struct vm_page_boot_seg *seg)
+{
+ return seg->end - seg->start;
+}
+
+static phys_addr_t __init
+vm_page_boot_seg_avail_size(struct vm_page_boot_seg *seg)
+{
+ return seg->avail_end - seg->avail_start;
+}
+
+unsigned long __init
+vm_page_bootalloc(size_t size)
+{
+ struct vm_page_boot_seg *seg;
+ phys_addr_t pa;
+ unsigned int i;
+
+ for (i = vm_page_select_alloc_seg(VM_PAGE_SEL_DIRECTMAP);
+ i < vm_page_segs_size;
+ i--) {
+ seg = &vm_page_boot_segs[i];
+
+ if (size <= vm_page_boot_seg_avail_size(seg)) {
+ pa = seg->avail_start;
+ seg->avail_start += vm_page_round(size);
+ return pa;
+ }
+ }
+
+ panic("vm_page: no physical memory available");
+}
+
+void __init
+vm_page_setup(void)
+{
+ struct vm_page_boot_seg *boot_seg;
+ struct vm_page_seg *seg;
+ struct vm_page *table, *page, *end;
+ size_t nr_pages, table_size;
+ unsigned long va;
+ unsigned int i;
+ phys_addr_t pa;
+
+ vm_page_check_boot_segs();
+
+ /*
+ * Compute the page table size.
+ */
+ nr_pages = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++)
+ nr_pages += vm_page_atop(vm_page_boot_seg_size(&vm_page_boot_segs[i]));
+
+ table_size = vm_page_round(nr_pages * sizeof(struct vm_page));
+ printf("vm_page: page table size: %lu entries (%luk)\n", nr_pages,
+ table_size >> 10);
+ table = (struct vm_page *)pmap_steal_memory(table_size);
+ va = (unsigned long)table;
+
+ /*
+ * Initialize the segments, associating them to the page table. When
+ * the segments are initialized, all their pages are set allocated.
+ * Pages are then released, which populates the free lists.
+ */
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ boot_seg = &vm_page_boot_segs[i];
+ vm_page_seg_init(seg, boot_seg->start, boot_seg->end, table);
+ page = seg->pages + vm_page_atop(boot_seg->avail_start
+ - boot_seg->start);
+ end = seg->pages + vm_page_atop(boot_seg->avail_end
+ - boot_seg->start);
+
+ while (page < end) {
+ page->type = VM_PT_FREE;
+ vm_page_seg_free_to_buddy(seg, page, 0);
+ page++;
+ }
+
+ table += vm_page_atop(vm_page_seg_size(seg));
+ }
+
+ while (va < (unsigned long)table) {
+ pa = pmap_extract(kernel_pmap, va);
+ page = vm_page_lookup_pa(pa);
+ assert((page != NULL) && (page->type == VM_PT_RESERVED));
+ page->type = VM_PT_TABLE;
+ va += PAGE_SIZE;
+ }
+
+ vm_page_is_ready = 1;
+}
+
+void __init
+vm_page_manage(struct vm_page *page)
+{
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
+ assert(page->type == VM_PT_RESERVED);
+
+ vm_page_set_type(page, 0, VM_PT_FREE);
+ vm_page_seg_free_to_buddy(&vm_page_segs[page->seg_index], page, 0);
+}
+
+struct vm_page *
+vm_page_lookup_pa(phys_addr_t pa)
+{
+ struct vm_page_seg *seg;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+
+ if ((pa >= seg->start) && (pa < seg->end))
+ return &seg->pages[vm_page_atop(pa - seg->start)];
+ }
+
+ return NULL;
+}
+
+struct vm_page *
+vm_page_alloc_pa(unsigned int order, unsigned int selector, unsigned short type)
+{
+ struct vm_page *page;
+ unsigned int i;
+
+ for (i = vm_page_select_alloc_seg(selector); i < vm_page_segs_size; i--) {
+ page = vm_page_seg_alloc(&vm_page_segs[i], order, type);
+
+ if (page != NULL)
+ return page;
+ }
+
+ if (type == VM_PT_PMAP)
+ panic("vm_page: unable to allocate pmap page");
+
+ return NULL;
+}
+
+void
+vm_page_free_pa(struct vm_page *page, unsigned int order)
+{
+ assert(page != NULL);
+ assert(page->seg_index < ARRAY_SIZE(vm_page_segs));
+
+ vm_page_seg_free(&vm_page_segs[page->seg_index], page, order);
+}
+
+const char *
+vm_page_seg_name(unsigned int seg_index)
+{
+ /* Don't use a switch statement since segments can be aliased */
+ if (seg_index == VM_PAGE_SEG_HIGHMEM)
+ return "HIGHMEM";
+ else if (seg_index == VM_PAGE_SEG_DIRECTMAP)
+ return "DIRECTMAP";
+ else if (seg_index == VM_PAGE_SEG_DMA32)
+ return "DMA32";
+ else if (seg_index == VM_PAGE_SEG_DMA)
+ return "DMA";
+ else
+ panic("vm_page: invalid segment index");
+}
+
+void
+vm_page_info_all(void)
+{
+ struct vm_page_seg *seg;
+ unsigned long pages;
+ unsigned int i;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ seg = &vm_page_segs[i];
+ pages = (unsigned long)(seg->pages_end - seg->pages);
+ printf("vm_page: %s: pages: %lu (%luM), free: %lu (%luM)\n",
+ vm_page_seg_name(i), pages, pages >> (20 - PAGE_SHIFT),
+ seg->nr_free_pages, seg->nr_free_pages >> (20 - PAGE_SHIFT));
+ }
+}
+
+phys_addr_t
+vm_page_mem_size(void)
+{
+ phys_addr_t total;
+ unsigned int i;
+
+ total = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ total += vm_page_seg_size(&vm_page_segs[i]);
+ }
+
+ return total;
+}
+
+unsigned long
+vm_page_mem_free(void)
+{
+ unsigned long total;
+ unsigned int i;
+
+ total = 0;
+
+ for (i = 0; i < vm_page_segs_size; i++) {
+ /* XXX */
+ if (i > VM_PAGE_SEG_DIRECTMAP)
+ continue;
+
+ total += vm_page_segs[i].nr_free_pages;
+ }
+
+ return total;
+}
diff --git a/vm/vm_page.h b/vm/vm_page.h
index 4536d1c5..f2e20a78 100644
--- a/vm/vm_page.h
+++ b/vm/vm_page.h
@@ -36,13 +36,14 @@
#include <mach/boolean.h>
#include <mach/vm_prot.h>
-#include <mach/vm_param.h>
+#include <machine/vm_param.h>
#include <vm/vm_object.h>
#include <vm/vm_types.h>
#include <kern/queue.h>
#include <kern/lock.h>
+#include <kern/log2.h>
-#include <kern/macro_help.h>
+#include <kern/macros.h>
#include <kern/sched_prim.h> /* definitions of wait/wakeup */
#if MACH_VM_DEBUG
@@ -76,6 +77,23 @@
*/
struct vm_page {
+ /* Members used in the vm_page module only */
+ struct list node;
+ unsigned short type;
+ unsigned short seg_index;
+ unsigned short order;
+ void *priv;
+
+ /*
+ * This member is used throughout the code and may only change for
+ * fictitious pages.
+ */
+ phys_addr_t phys_addr;
+
+ /* We use an empty struct as the delimiter. */
+ struct {} vm_page_header;
+#define VM_PAGE_HEADER_SIZE offsetof(struct vm_page, vm_page_header)
+
queue_chain_t pageq; /* queue info for FIFO
* queue or free list (P) */
queue_chain_t listq; /* all pages in same object (O) */
@@ -84,7 +102,7 @@ struct vm_page {
vm_object_t object; /* which object am I in (O,P) */
vm_offset_t offset; /* offset into that object (O,P) */
- unsigned int wire_count:16, /* how many wired down maps use me?
+ unsigned int wire_count:15, /* how many wired down maps use me?
(O&P) */
/* boolean_t */ inactive:1, /* page is in inactive list (P) */
active:1, /* page is in active list (P) */
@@ -92,14 +110,8 @@ struct vm_page {
free:1, /* page is on free list (P) */
reference:1, /* page has been used (P) */
external:1, /* page considered external (P) */
- extcounted:1, /* page counted in ext counts (P) */
- :0; /* (force to 'long' boundary) */
-#ifdef ns32000
- int pad; /* extra space for ns32000 bit ops */
-#endif /* ns32000 */
-
- unsigned int
- /* boolean_t */ busy:1, /* page is in transit (O) */
+ extcounted:1, /* page counted in ext counts (P) */
+ busy:1, /* page is in transit (O) */
wanted:1, /* someone is waiting for page (O) */
tabled:1, /* page is in VP table (O) */
fictitious:1, /* Physical page doesn't exist (O) */
@@ -112,13 +124,10 @@ struct vm_page {
dirty:1, /* Page must be cleaned (O) */
precious:1, /* Page is precious; data must be
* returned even if clean (O) */
- overwriting:1, /* Request to unlock has been made
+ overwriting:1; /* Request to unlock has been made
* without having data. (O)
* [See vm_object_overwrite] */
- :0;
- vm_offset_t phys_addr; /* Physical address of page, passed
- * to pmap_enter (read-only) */
vm_prot_t page_lock; /* Uses prohibited by data manager (O) */
vm_prot_t unlock_request; /* Outstanding unlock request (O) */
};
@@ -147,8 +156,6 @@ struct vm_page {
*/
extern
-vm_page_t vm_page_queue_free; /* memory free queue */
-extern
vm_page_t vm_page_queue_fictitious; /* fictitious free queue */
extern
queue_head_t vm_page_queue_active; /* active memory queue */
@@ -156,13 +163,6 @@ extern
queue_head_t vm_page_queue_inactive; /* inactive memory queue */
extern
-vm_offset_t first_phys_addr; /* physical address for first_page */
-extern
-vm_offset_t last_phys_addr; /* physical address for last_page */
-
-extern
-int vm_page_free_count; /* How many pages are free? */
-extern
int vm_page_fictitious_count;/* How many fictitious pages are free? */
extern
int vm_page_active_count; /* How many pages are active? */
@@ -208,25 +208,21 @@ extern void vm_page_bootstrap(
vm_offset_t *endp);
extern void vm_page_module_init(void);
-extern void vm_page_create(
- vm_offset_t start,
- vm_offset_t end);
extern vm_page_t vm_page_lookup(
vm_object_t object,
vm_offset_t offset);
extern vm_page_t vm_page_grab_fictitious(void);
-extern void vm_page_release_fictitious(vm_page_t);
-extern boolean_t vm_page_convert(vm_page_t, boolean_t);
+extern boolean_t vm_page_convert(vm_page_t *, boolean_t);
extern void vm_page_more_fictitious(void);
extern vm_page_t vm_page_grab(boolean_t);
-extern void vm_page_release(vm_page_t, boolean_t);
+extern vm_page_t vm_page_grab_contig(vm_size_t, unsigned int);
+extern void vm_page_free_contig(vm_page_t, vm_size_t);
extern void vm_page_wait(void (*)(void));
extern vm_page_t vm_page_alloc(
vm_object_t object,
vm_offset_t offset);
extern void vm_page_init(
- vm_page_t mem,
- vm_offset_t phys_addr);
+ vm_page_t mem);
extern void vm_page_free(vm_page_t);
extern void vm_page_activate(vm_page_t);
extern void vm_page_deactivate(vm_page_t);
@@ -247,8 +243,6 @@ extern void vm_page_copy(vm_page_t src_m, vm_page_t dest_m);
extern void vm_page_wire(vm_page_t);
extern void vm_page_unwire(vm_page_t);
-extern void vm_set_page_size(void);
-
#if MACH_VM_DEBUG
extern unsigned int vm_page_info(
hash_info_bucket_t *info,
@@ -326,4 +320,217 @@ extern unsigned int vm_page_info(
} \
MACRO_END
+/*
+ * Copyright (c) 2010-2014 Richard Braun.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ *
+ * Physical page management.
+ */
+
+/*
+ * Address/page conversion and rounding macros (not inline functions to
+ * be easily usable on both virtual and physical addresses, which may not
+ * have the same type size).
+ */
+#define vm_page_atop(addr) ((addr) >> PAGE_SHIFT)
+#define vm_page_ptoa(page) ((page) << PAGE_SHIFT)
+#define vm_page_trunc(addr) P2ALIGN(addr, PAGE_SIZE)
+#define vm_page_round(addr) P2ROUND(addr, PAGE_SIZE)
+#define vm_page_aligned(addr) P2ALIGNED(addr, PAGE_SIZE)
+
+/*
+ * Segment selectors.
+ *
+ * Selector-to-segment-list translation table :
+ * DMA DMA
+ * DMA32 DMA32 DMA
+ * DIRECTMAP DIRECTMAP DMA32 DMA
+ * HIGHMEM HIGHMEM DIRECTMAP DMA32 DMA
+ */
+#define VM_PAGE_SEL_DMA 0
+#define VM_PAGE_SEL_DMA32 1
+#define VM_PAGE_SEL_DIRECTMAP 2
+#define VM_PAGE_SEL_HIGHMEM 3
+
+/*
+ * Page usage types.
+ *
+ * Failing to allocate pmap pages will cause a kernel panic.
+ * TODO Obviously, this needs to be addressed, e.g. with a reserved pool of
+ * pages.
+ */
+#define VM_PT_FREE 0 /* Page unused */
+#define VM_PT_RESERVED 1 /* Page reserved at boot time */
+#define VM_PT_TABLE 2 /* Page is part of the page table */
+#define VM_PT_PMAP 3 /* Page stores pmap-specific data */
+#define VM_PT_KMEM 4 /* Page is part of a kmem slab */
+#define VM_PT_STACK 5 /* Type for generic kernel allocations */
+#define VM_PT_KERNEL 6 /* Type for generic kernel allocations */
+
+static inline unsigned short
+vm_page_type(const struct vm_page *page)
+{
+ return page->type;
+}
+
+void vm_page_set_type(struct vm_page *page, unsigned int order,
+ unsigned short type);
+
+static inline unsigned int
+vm_page_order(size_t size)
+{
+ return iorder2(vm_page_atop(vm_page_round(size)));
+}
+
+static inline phys_addr_t
+vm_page_to_pa(const struct vm_page *page)
+{
+ return page->phys_addr;
+}
+
+#if 0
+static inline unsigned long
+vm_page_direct_va(phys_addr_t pa)
+{
+ assert(pa < VM_PAGE_DIRECTMAP_LIMIT);
+ return ((unsigned long)pa + VM_MIN_DIRECTMAP_ADDRESS);
+}
+
+static inline phys_addr_t
+vm_page_direct_pa(unsigned long va)
+{
+ assert(va >= VM_MIN_DIRECTMAP_ADDRESS);
+ assert(va < VM_MAX_DIRECTMAP_ADDRESS);
+ return (va - VM_MIN_DIRECTMAP_ADDRESS);
+}
+
+static inline void *
+vm_page_direct_ptr(const struct vm_page *page)
+{
+ return (void *)vm_page_direct_va(vm_page_to_pa(page));
+}
+#endif
+
+/*
+ * Associate private data with a page.
+ */
+static inline void
+vm_page_set_priv(struct vm_page *page, void *priv)
+{
+ page->priv = priv;
+}
+
+static inline void *
+vm_page_get_priv(const struct vm_page *page)
+{
+ return page->priv;
+}
+
+/*
+ * Load physical memory into the vm_page module at boot time.
+ *
+ * The avail_start and avail_end parameters are used to maintain a simple
+ * heap for bootstrap allocations.
+ *
+ * All addresses must be page-aligned. Segments can be loaded in any order.
+ */
+void vm_page_load(unsigned int seg_index, phys_addr_t start, phys_addr_t end,
+ phys_addr_t avail_start, phys_addr_t avail_end);
+
+/*
+ * Return true if the vm_page module is completely initialized, false
+ * otherwise, in which case only vm_page_bootalloc() can be used for
+ * allocations.
+ */
+int vm_page_ready(void);
+
+/*
+ * Early allocation function.
+ *
+ * This function is used by the vm_resident module to implement
+ * pmap_steal_memory. It can be used after physical segments have been loaded
+ * and before the vm_page module is initialized.
+ */
+unsigned long vm_page_bootalloc(size_t size);
+
+/*
+ * Set up the vm_page module.
+ *
+ * Architecture-specific code must have loaded segments before calling this
+ * function. Segments must comply with the selector-to-segment-list table,
+ * e.g. HIGHMEM is loaded if and only if DIRECTMAP, DMA32 and DMA are loaded,
+ * notwithstanding segment aliasing.
+ *
+ * Once this function returns, the vm_page module is ready, and normal
+ * allocation functions can be used.
+ */
+void vm_page_setup(void);
+
+/*
+ * Make the given page managed by the vm_page module.
+ *
+ * If additional memory can be made usable after the VM system is initialized,
+ * it should be reported through this function.
+ */
+void vm_page_manage(struct vm_page *page);
+
+/*
+ * Return the page descriptor for the given physical address.
+ */
+struct vm_page * vm_page_lookup_pa(phys_addr_t pa);
+
+/*
+ * Allocate a block of 2^order physical pages.
+ *
+ * The selector is used to determine the segments from which allocation can
+ * be attempted.
+ *
+ * This function should only be used by the vm_resident module.
+ */
+struct vm_page * vm_page_alloc_pa(unsigned int order, unsigned int selector,
+ unsigned short type);
+
+/*
+ * Release a block of 2^order physical pages.
+ *
+ * This function should only be used by the vm_resident module.
+ */
+void vm_page_free_pa(struct vm_page *page, unsigned int order);
+
+/*
+ * Return the name of the given segment.
+ */
+const char * vm_page_seg_name(unsigned int seg_index);
+
+/*
+ * Display internal information about the module.
+ */
+void vm_page_info_all(void);
+
+/*
+ * Return the total amount of physical memory.
+ */
+phys_addr_t vm_page_mem_size(void);
+
+/*
+ * Return the amount of free (unused) pages.
+ *
+ * XXX This currently relies on the kernel being non preemptible and
+ * uniprocessor.
+ */
+unsigned long vm_page_mem_free(void);
+
#endif /* _VM_VM_PAGE_H_ */
diff --git a/vm/vm_pageout.c b/vm/vm_pageout.c
index eb75b975..72f96cbf 100644
--- a/vm/vm_pageout.c
+++ b/vm/vm_pageout.c
@@ -52,7 +52,6 @@
#include <vm/vm_page.h>
#include <vm/vm_pageout.h>
#include <machine/locore.h>
-#include <machine/vm_tuning.h>
@@ -83,7 +82,7 @@
* of active+inactive pages that should be inactive.
* The pageout daemon uses it to update vm_page_inactive_target.
*
- * If vm_page_free_count falls below vm_page_free_target and
+ * If the number of free pages falls below vm_page_free_target and
* vm_page_inactive_count is below vm_page_inactive_target,
* then the pageout daemon starts running.
*/
@@ -94,20 +93,20 @@
/*
* Once the pageout daemon starts running, it keeps going
- * until vm_page_free_count meets or exceeds vm_page_free_target.
+ * until the number of free pages meets or exceeds vm_page_free_target.
*/
#ifndef VM_PAGE_FREE_TARGET
-#define VM_PAGE_FREE_TARGET(free) (15 + (free) / 80)
+#define VM_PAGE_FREE_TARGET(free) (150 + (free) * 10 / 100)
#endif /* VM_PAGE_FREE_TARGET */
/*
- * The pageout daemon always starts running once vm_page_free_count
+ * The pageout daemon always starts running once the number of free pages
* falls below vm_page_free_min.
*/
#ifndef VM_PAGE_FREE_MIN
-#define VM_PAGE_FREE_MIN(free) (10 + (free) / 100)
+#define VM_PAGE_FREE_MIN(free) (100 + (free) * 8 / 100)
#endif /* VM_PAGE_FREE_MIN */
/* When vm_page_external_count exceeds vm_page_external_limit,
@@ -126,18 +125,18 @@
#endif /* VM_PAGE_EXTERNAL_TARGET */
/*
- * When vm_page_free_count falls below vm_page_free_reserved,
+ * When the number of free pages falls below vm_page_free_reserved,
* only vm-privileged threads can allocate pages. vm-privilege
* allows the pageout daemon and default pager (and any other
* associated threads needed for default pageout) to continue
* operation by dipping into the reserved pool of pages. */
#ifndef VM_PAGE_FREE_RESERVED
-#define VM_PAGE_FREE_RESERVED 50
+#define VM_PAGE_FREE_RESERVED 500
#endif /* VM_PAGE_FREE_RESERVED */
/*
- * When vm_page_free_count falls below vm_pageout_reserved_internal,
+ * When the number of free pages falls below vm_pageout_reserved_internal,
* the pageout daemon no longer trusts external pagers to clean pages.
* External pagers are probably all wedged waiting for a free page.
* It forcibly double-pages dirty pages belonging to external objects,
@@ -145,11 +144,11 @@
*/
#ifndef VM_PAGEOUT_RESERVED_INTERNAL
-#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 25)
+#define VM_PAGEOUT_RESERVED_INTERNAL(reserve) ((reserve) - 250)
#endif /* VM_PAGEOUT_RESERVED_INTERNAL */
/*
- * When vm_page_free_count falls below vm_pageout_reserved_really,
+ * When the number of free pages falls below vm_pageout_reserved_really,
* the pageout daemon stops work entirely to let the default pager
* catch up (assuming the default pager has pages to clean).
* Beyond this point, it is too dangerous to consume memory
@@ -157,12 +156,9 @@
*/
#ifndef VM_PAGEOUT_RESERVED_REALLY
-#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 40)
+#define VM_PAGEOUT_RESERVED_REALLY(reserve) ((reserve) - 400)
#endif /* VM_PAGEOUT_RESERVED_REALLY */
-extern void vm_pageout_continue();
-extern void vm_pageout_scan_continue();
-
unsigned int vm_pageout_reserved_internal = 0;
unsigned int vm_pageout_reserved_really = 0;
@@ -230,16 +226,16 @@ unsigned int vm_pageout_inactive_cleaned_external = 0;
* not busy on exit.
*/
vm_page_t
-vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
- register vm_page_t m;
- vm_offset_t paging_offset;
- register vm_object_t new_object;
- vm_offset_t new_offset;
- boolean_t flush;
+vm_pageout_setup(
+ vm_page_t m,
+ vm_offset_t paging_offset,
+ vm_object_t new_object,
+ vm_offset_t new_offset,
+ boolean_t flush)
{
- register vm_object_t old_object = m->object;
- register vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
- register vm_page_t new_m;
+ vm_object_t old_object = m->object;
+ vm_page_t holding_page = 0; /*'=0'to quiet gcc warnings*/
+ vm_page_t new_m;
assert(m->busy && !m->absent && !m->fictitious);
@@ -417,15 +413,15 @@ vm_pageout_setup(m, paging_offset, new_object, new_offset, flush)
* copy to a new page in a new object, if not.
*/
void
-vm_pageout_page(m, initial, flush)
- register vm_page_t m;
- boolean_t initial;
- boolean_t flush;
+vm_pageout_page(
+ vm_page_t m,
+ boolean_t initial,
+ boolean_t flush)
{
vm_map_copy_t copy;
- register vm_object_t old_object;
- register vm_object_t new_object;
- register vm_page_t holding_page;
+ vm_object_t old_object;
+ vm_object_t new_object;
+ vm_page_t holding_page;
vm_offset_t paging_offset;
kern_return_t rc;
boolean_t precious_clean;
@@ -511,7 +507,7 @@ vm_pageout_page(m, initial, flush)
* vm_page_free_wanted == 0.
*/
-void vm_pageout_scan()
+void vm_pageout_scan(void)
{
unsigned int burst_count;
unsigned int want_pages;
@@ -555,13 +551,15 @@ void vm_pageout_scan()
stack_collect();
net_kmsg_collect();
consider_task_collect();
+ if (0) /* XXX: pcb_collect doesn't do anything yet, so it is
+ pointless to call consider_thread_collect. */
consider_thread_collect();
slab_collect();
for (burst_count = 0;;) {
- register vm_page_t m;
- register vm_object_t object;
- unsigned int free_count;
+ vm_page_t m;
+ vm_object_t object;
+ unsigned long free_count;
/*
* Recalculate vm_page_inactivate_target.
@@ -578,7 +576,7 @@ void vm_pageout_scan()
while ((vm_page_inactive_count < vm_page_inactive_target) &&
!queue_empty(&vm_page_queue_active)) {
- register vm_object_t obj;
+ vm_object_t obj;
vm_pageout_active++;
m = (vm_page_t) queue_first(&vm_page_queue_active);
@@ -632,7 +630,7 @@ void vm_pageout_scan()
*/
simple_lock(&vm_page_queue_free_lock);
- free_count = vm_page_free_count;
+ free_count = vm_page_mem_free();
if ((free_count >= vm_page_free_target) &&
(vm_page_external_count <= vm_page_external_target) &&
(vm_page_free_wanted == 0)) {
@@ -695,7 +693,7 @@ void vm_pageout_scan()
if (want_pages || m->external)
break;
- m = (vm_page_t) queue_next (m);
+ m = (vm_page_t) queue_next (&m->pageq);
if (!m)
goto pause;
}
@@ -862,7 +860,7 @@ void vm_pageout_scan()
}
}
-void vm_pageout_scan_continue()
+void vm_pageout_scan_continue(void)
{
/*
* We just paused to let the pagers catch up.
@@ -893,7 +891,7 @@ void vm_pageout_scan_continue()
* vm_pageout is the high level pageout daemon.
*/
-void vm_pageout_continue()
+void vm_pageout_continue(void)
{
/*
* The pageout daemon is never done, so loop forever.
@@ -915,12 +913,13 @@ void vm_pageout_continue()
}
}
-void vm_pageout()
+void vm_pageout(void)
{
- int free_after_reserve;
+ unsigned long free_after_reserve;
current_thread()->vm_privilege = TRUE;
stack_privilege(current_thread());
+ thread_set_own_priority(0);
/*
* Initialize some paging parameters.
@@ -952,7 +951,7 @@ void vm_pageout()
vm_pageout_reserved_really =
VM_PAGEOUT_RESERVED_REALLY(vm_page_free_reserved);
- free_after_reserve = vm_page_free_count - vm_page_free_reserved;
+ free_after_reserve = vm_page_mem_free() - vm_page_free_reserved;
if (vm_page_external_limit == 0)
vm_page_external_limit =
diff --git a/vm/vm_pageout.h b/vm/vm_pageout.h
index d41ee30a..ea6cfaf4 100644
--- a/vm/vm_pageout.h
+++ b/vm/vm_pageout.h
@@ -44,4 +44,10 @@ extern vm_page_t vm_pageout_setup(vm_page_t, vm_offset_t, vm_object_t,
vm_offset_t, boolean_t);
extern void vm_pageout_page(vm_page_t, boolean_t, boolean_t);
+extern void vm_pageout(void) __attribute__((noreturn));
+
+extern void vm_pageout_continue(void) __attribute__((noreturn));
+
+extern void vm_pageout_scan_continue(void) __attribute__((noreturn));
+
#endif /* _VM_VM_PAGEOUT_H_ */
diff --git a/vm/vm_print.h b/vm/vm_print.h
index 69a20ba3..8a36d75e 100644
--- a/vm/vm_print.h
+++ b/vm/vm_print.h
@@ -1,3 +1,21 @@
+/*
+ * Copyright (c) 2013 Free Software Foundation.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
#ifndef VM_PRINT_H
#define VM_PRINT_H
@@ -5,10 +23,11 @@
#include <machine/db_machdep.h>
/* Debugging: print a map */
-extern void vm_map_print(vm_map_t);
+extern void vm_map_print(db_expr_t addr, boolean_t have_addr,
+ db_expr_t count, const char *modif);
/* Pretty-print a copy object for ddb. */
-extern void vm_map_copy_print(vm_map_copy_t);
+extern void vm_map_copy_print(const vm_map_copy_t);
#include <vm/vm_object.h>
@@ -16,7 +35,7 @@ extern void vm_object_print(vm_object_t);
#include <vm/vm_page.h>
-extern void vm_page_print(vm_page_t);
+extern void vm_page_print(const vm_page_t);
#endif /* VM_PRINT_H */
diff --git a/vm/vm_resident.c b/vm/vm_resident.c
index 66ab51f0..fa7a337b 100644
--- a/vm/vm_resident.c
+++ b/vm/vm_resident.c
@@ -27,7 +27,7 @@
* the rights to redistribute these changes.
*/
/*
- * File: vm/vm_page.c
+ * File: vm/vm_resident.c
* Author: Avadis Tevanian, Jr., Michael Wayne Young
*
* Resident memory management module.
@@ -65,14 +65,14 @@
/*
- * Associated with eacn page of user-allocatable memory is a
+ * Associated with each page of user-allocatable memory is a
* page structure.
*/
/*
* These variables record the values returned by vm_page_bootstrap,
* for debugging purposes. The implementation of pmap_steal_memory
- * and pmap_startup here also uses them internally.
+ * here also uses them internally.
*/
vm_offset_t virtual_space_start;
@@ -95,29 +95,18 @@ vm_page_bucket_t *vm_page_buckets; /* Array of buckets */
unsigned int vm_page_bucket_count = 0; /* How big is array? */
unsigned int vm_page_hash_mask; /* Mask for hash function */
-/*
- * Resident page structures are initialized from
- * a template (see vm_page_alloc).
- *
- * When adding a new field to the virtual memory
- * object structure, be sure to add initialization
- * (see vm_page_bootstrap).
- */
-struct vm_page vm_page_template;
-
-/*
- * Resident pages that represent real memory
- * are allocated from a free list.
- */
-vm_page_t vm_page_queue_free;
vm_page_t vm_page_queue_fictitious;
decl_simple_lock_data(,vm_page_queue_free_lock)
unsigned int vm_page_free_wanted;
-int vm_page_free_count;
int vm_page_fictitious_count;
int vm_page_external_count;
-unsigned int vm_page_free_count_minimum; /* debugging */
+/*
+ * This variable isn't directly used. It's merely a placeholder for the
+ * address used to synchronize threads waiting for pages to become
+ * available. The real value is returned by vm_page_free_mem().
+ */
+unsigned int vm_page_free_avail;
/*
* Occasionally, the virtual memory system uses
@@ -192,48 +181,15 @@ void vm_page_bootstrap(
vm_offset_t *startp,
vm_offset_t *endp)
{
- register vm_page_t m;
int i;
/*
- * Initialize the vm_page template.
- */
-
- m = &vm_page_template;
- m->object = VM_OBJECT_NULL; /* reset later */
- m->offset = 0; /* reset later */
- m->wire_count = 0;
-
- m->inactive = FALSE;
- m->active = FALSE;
- m->laundry = FALSE;
- m->free = FALSE;
- m->external = FALSE;
-
- m->busy = TRUE;
- m->wanted = FALSE;
- m->tabled = FALSE;
- m->fictitious = FALSE;
- m->private = FALSE;
- m->absent = FALSE;
- m->error = FALSE;
- m->dirty = FALSE;
- m->precious = FALSE;
- m->reference = FALSE;
-
- m->phys_addr = 0; /* reset later */
-
- m->page_lock = VM_PROT_NONE;
- m->unlock_request = VM_PROT_NONE;
-
- /*
* Initialize the page queues.
*/
simple_lock_init(&vm_page_queue_free_lock);
simple_lock_init(&vm_page_queue_lock);
- vm_page_queue_free = VM_PAGE_NULL;
vm_page_queue_fictitious = VM_PAGE_NULL;
queue_init(&vm_page_queue_active);
queue_init(&vm_page_queue_inactive);
@@ -241,12 +197,6 @@ void vm_page_bootstrap(
vm_page_free_wanted = 0;
/*
- * Steal memory for the kernel map entries.
- */
-
- kentry_data = pmap_steal_memory(kentry_data_size);
-
- /*
* Allocate (and initialize) the virtual-to-physical
* table hash buckets.
*
@@ -274,35 +224,25 @@ void vm_page_bootstrap(
sizeof(vm_page_bucket_t));
for (i = 0; i < vm_page_bucket_count; i++) {
- register vm_page_bucket_t *bucket = &vm_page_buckets[i];
+ vm_page_bucket_t *bucket = &vm_page_buckets[i];
bucket->pages = VM_PAGE_NULL;
simple_lock_init(&bucket->lock);
}
- /*
- * Machine-dependent code allocates the resident page table.
- * It uses vm_page_init to initialize the page frames.
- * The code also returns to us the virtual space available
- * to the kernel. We don't trust the pmap module
- * to get the alignment right.
- */
+ vm_page_setup();
- pmap_startup(&virtual_space_start, &virtual_space_end);
virtual_space_start = round_page(virtual_space_start);
virtual_space_end = trunc_page(virtual_space_end);
*startp = virtual_space_start;
*endp = virtual_space_end;
-
- /* printf("vm_page_bootstrap: %d free pages\n", vm_page_free_count);*/
- vm_page_free_count_minimum = vm_page_free_count;
}
#ifndef MACHINE_PAGES
/*
- * We implement pmap_steal_memory and pmap_startup with the help
- * of two simpler functions, pmap_virtual_space and pmap_next_page.
+ * We implement pmap_steal_memory with the help
+ * of two simpler functions, pmap_virtual_space and vm_page_bootalloc.
*/
vm_offset_t pmap_steal_memory(
@@ -310,11 +250,7 @@ vm_offset_t pmap_steal_memory(
{
vm_offset_t addr, vaddr, paddr;
- /*
- * We round the size to an integer multiple.
- */
-
- size = (size + 3) &~ 3;
+ size = round_page(size);
/*
* If this is the first call to pmap_steal_memory,
@@ -347,8 +283,7 @@ vm_offset_t pmap_steal_memory(
for (vaddr = round_page(addr);
vaddr < addr + size;
vaddr += PAGE_SIZE) {
- if (!pmap_next_page(&paddr))
- panic("pmap_steal_memory");
+ paddr = vm_page_bootalloc(PAGE_SIZE);
/*
* XXX Logically, these mappings should be wired,
@@ -361,64 +296,6 @@ vm_offset_t pmap_steal_memory(
return addr;
}
-
-void pmap_startup(
- vm_offset_t *startp,
- vm_offset_t *endp)
-{
- unsigned int i, npages, pages_initialized;
- vm_page_t pages;
- vm_offset_t paddr;
-
- /*
- * We calculate how many page frames we will have
- * and then allocate the page structures in one chunk.
- */
-
- npages = ((PAGE_SIZE * pmap_free_pages() +
- (round_page(virtual_space_start) - virtual_space_start)) /
- (PAGE_SIZE + sizeof *pages));
-
- pages = (vm_page_t) pmap_steal_memory(npages * sizeof *pages);
-
- /*
- * Initialize the page frames.
- */
-
- for (i = 0, pages_initialized = 0; i < npages; i++) {
- if (!pmap_next_page(&paddr))
- break;
-
- vm_page_init(&pages[i], paddr);
- pages_initialized++;
- }
- i = 0;
- while (pmap_next_page(&paddr))
- i++;
- if (i)
- printf("%u memory page(s) left away\n", i);
-
- /*
- * Release pages in reverse order so that physical pages
- * initially get allocated in ascending addresses. This keeps
- * the devices (which must address physical memory) happy if
- * they require several consecutive pages.
- */
-
- for (i = pages_initialized; i > 0; i--) {
- vm_page_release(&pages[i - 1], FALSE);
- }
-
- /*
- * We have to re-align virtual_space_start,
- * because pmap_steal_memory has been using it.
- */
-
- virtual_space_start = round_page(virtual_space_start);
-
- *startp = virtual_space_start;
- *endp = virtual_space_end;
-}
#endif /* MACHINE_PAGES */
/*
@@ -430,35 +307,7 @@ void pmap_startup(
void vm_page_module_init(void)
{
kmem_cache_init(&vm_page_cache, "vm_page", sizeof(struct vm_page), 0,
- NULL, NULL, NULL, 0);
-}
-
-/*
- * Routine: vm_page_create
- * Purpose:
- * After the VM system is up, machine-dependent code
- * may stumble across more physical memory. For example,
- * memory that it was reserving for a frame buffer.
- * vm_page_create turns this memory into available pages.
- */
-
-void vm_page_create(
- vm_offset_t start,
- vm_offset_t end)
-{
- vm_offset_t paddr;
- vm_page_t m;
-
- for (paddr = round_page(start);
- paddr < trunc_page(end);
- paddr += PAGE_SIZE) {
- m = (vm_page_t) kmem_cache_alloc(&vm_page_cache);
- if (m == VM_PAGE_NULL)
- panic("vm_page_create");
-
- vm_page_init(m, paddr);
- vm_page_release(m, FALSE);
- }
+ NULL, 0);
}
/*
@@ -483,11 +332,11 @@ void vm_page_create(
*/
void vm_page_insert(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_bucket_t *bucket;
+ vm_page_bucket_t *bucket;
VM_PAGE_CHECK(mem);
@@ -555,11 +404,11 @@ void vm_page_insert(
*/
void vm_page_replace(
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_bucket_t *bucket;
+ vm_page_bucket_t *bucket;
VM_PAGE_CHECK(mem);
@@ -582,7 +431,7 @@ void vm_page_replace(
simple_lock(&bucket->lock);
if (bucket->pages) {
vm_page_t *mp = &bucket->pages;
- register vm_page_t m = *mp;
+ vm_page_t m = *mp;
do {
if (m->object == object && m->offset == offset) {
/*
@@ -646,10 +495,10 @@ void vm_page_replace(
*/
void vm_page_remove(
- register vm_page_t mem)
+ vm_page_t mem)
{
- register vm_page_bucket_t *bucket;
- register vm_page_t this;
+ vm_page_bucket_t *bucket;
+ vm_page_t this;
assert(mem->tabled);
VM_PAGE_CHECK(mem);
@@ -665,7 +514,7 @@ void vm_page_remove(
bucket->pages = mem->next;
} else {
- register vm_page_t *prev;
+ vm_page_t *prev;
for (prev = &this->next;
(this = *prev) != mem;
@@ -704,11 +553,11 @@ void vm_page_remove(
*/
vm_page_t vm_page_lookup(
- register vm_object_t object,
- register vm_offset_t offset)
+ vm_object_t object,
+ vm_offset_t offset)
{
- register vm_page_t mem;
- register vm_page_bucket_t *bucket;
+ vm_page_t mem;
+ vm_page_bucket_t *bucket;
/*
* Search the hash table for this object/offset pair
@@ -735,9 +584,9 @@ vm_page_t vm_page_lookup(
* The object must be locked.
*/
void vm_page_rename(
- register vm_page_t mem,
- register vm_object_t new_object,
- vm_offset_t new_offset)
+ vm_page_t mem,
+ vm_object_t new_object,
+ vm_offset_t new_offset)
{
/*
* Changes to mem->object require the page lock because
@@ -750,6 +599,33 @@ void vm_page_rename(
vm_page_unlock_queues();
}
+static void vm_page_init_template(vm_page_t m)
+{
+ m->object = VM_OBJECT_NULL; /* reset later */
+ m->offset = 0; /* reset later */
+ m->wire_count = 0;
+
+ m->inactive = FALSE;
+ m->active = FALSE;
+ m->laundry = FALSE;
+ m->free = FALSE;
+ m->external = FALSE;
+
+ m->busy = TRUE;
+ m->wanted = FALSE;
+ m->tabled = FALSE;
+ m->fictitious = FALSE;
+ m->private = FALSE;
+ m->absent = FALSE;
+ m->error = FALSE;
+ m->dirty = FALSE;
+ m->precious = FALSE;
+ m->reference = FALSE;
+
+ m->page_lock = VM_PROT_NONE;
+ m->unlock_request = VM_PROT_NONE;
+}
+
/*
* vm_page_init:
*
@@ -758,11 +634,9 @@ void vm_page_rename(
* so that it can be given to vm_page_release or vm_page_insert.
*/
void vm_page_init(
- vm_page_t mem,
- vm_offset_t phys_addr)
+ vm_page_t mem)
{
- *mem = vm_page_template;
- mem->phys_addr = phys_addr;
+ vm_page_init_template(mem);
}
/*
@@ -774,7 +648,7 @@ void vm_page_init(
vm_page_t vm_page_grab_fictitious(void)
{
- register vm_page_t m;
+ vm_page_t m;
simple_lock(&vm_page_queue_free_lock);
m = vm_page_queue_fictitious;
@@ -794,8 +668,8 @@ vm_page_t vm_page_grab_fictitious(void)
* Release a fictitious page to the free list.
*/
-void vm_page_release_fictitious(
- register vm_page_t m)
+static void vm_page_release_fictitious(
+ vm_page_t m)
{
simple_lock(&vm_page_queue_free_lock);
if (m->free)
@@ -818,7 +692,7 @@ int vm_page_fictitious_quantum = 5;
void vm_page_more_fictitious(void)
{
- register vm_page_t m;
+ vm_page_t m;
int i;
for (i = 0; i < vm_page_fictitious_quantum; i++) {
@@ -826,7 +700,8 @@ void vm_page_more_fictitious(void)
if (m == VM_PAGE_NULL)
panic("vm_page_more_fictitious");
- vm_page_init(m, vm_page_fictitious_addr);
+ vm_page_init(m);
+ m->phys_addr = vm_page_fictitious_addr;
m->fictitious = TRUE;
vm_page_release_fictitious(m);
}
@@ -836,25 +711,46 @@ void vm_page_more_fictitious(void)
* vm_page_convert:
*
* Attempt to convert a fictitious page into a real page.
+ *
+ * The object referenced by *MP must be locked.
*/
boolean_t vm_page_convert(
- register vm_page_t m,
+ struct vm_page **mp,
boolean_t external)
{
- register vm_page_t real_m;
+ struct vm_page *real_m, *fict_m;
+ vm_object_t object;
+ vm_offset_t offset;
+
+ fict_m = *mp;
+
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+ assert(!fict_m->active);
+ assert(!fict_m->inactive);
real_m = vm_page_grab(external);
if (real_m == VM_PAGE_NULL)
return FALSE;
- m->phys_addr = real_m->phys_addr;
- m->fictitious = FALSE;
+ object = fict_m->object;
+ offset = fict_m->offset;
+ vm_page_remove(fict_m);
- real_m->phys_addr = vm_page_fictitious_addr;
- real_m->fictitious = TRUE;
+ memcpy(&real_m->vm_page_header,
+ &fict_m->vm_page_header,
+ sizeof(*fict_m) - VM_PAGE_HEADER_SIZE);
+ real_m->fictitious = FALSE;
- vm_page_release_fictitious(real_m);
+ vm_page_insert(real_m, object, offset);
+
+ assert(real_m->phys_addr != vm_page_fictitious_addr);
+ assert(fict_m->fictitious);
+ assert(fict_m->phys_addr == vm_page_fictitious_addr);
+
+ vm_page_release_fictitious(fict_m);
+ *mp = real_m;
return TRUE;
}
@@ -868,7 +764,7 @@ boolean_t vm_page_convert(
vm_page_t vm_page_grab(
boolean_t external)
{
- register vm_page_t mem;
+ vm_page_t mem;
simple_lock(&vm_page_queue_free_lock);
@@ -878,7 +774,7 @@ vm_page_t vm_page_grab(
* for externally-managed pages.
*/
- if (((vm_page_free_count < vm_page_free_reserved)
+ if (((vm_page_mem_free() < vm_page_free_reserved)
|| (external
&& (vm_page_external_count > vm_page_external_limit)))
&& !current_thread()->vm_privilege) {
@@ -886,15 +782,16 @@ vm_page_t vm_page_grab(
return VM_PAGE_NULL;
}
- if (vm_page_queue_free == VM_PAGE_NULL)
- panic("vm_page_grab");
+ mem = vm_page_alloc_pa(0, VM_PAGE_SEL_DIRECTMAP, VM_PT_KERNEL);
+
+ if (mem == NULL) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return NULL;
+ }
- if (--vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
if (external)
vm_page_external_count++;
- mem = vm_page_queue_free;
- vm_page_queue_free = (vm_page_t) mem->pageq.next;
+
mem->free = FALSE;
mem->extcounted = mem->external = external;
simple_unlock(&vm_page_queue_free_lock);
@@ -910,15 +807,15 @@ vm_page_t vm_page_grab(
* it doesn't really matter.
*/
- if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
+ if ((vm_page_mem_free() < vm_page_free_min) ||
+ ((vm_page_mem_free() < vm_page_free_target) &&
(vm_page_inactive_count < vm_page_inactive_target)))
thread_wakeup((event_t) &vm_page_free_wanted);
return mem;
}
-vm_offset_t vm_page_grab_phys_addr()
+vm_offset_t vm_page_grab_phys_addr(void)
{
vm_page_t p = vm_page_grab(FALSE);
if (p == VM_PAGE_NULL)
@@ -928,208 +825,92 @@ vm_offset_t vm_page_grab_phys_addr()
}
/*
- * vm_page_grab_contiguous_pages:
- *
- * Take N pages off the free list, the pages should
- * cover a contiguous range of physical addresses.
- * [Used by device drivers to cope with DMA limitations]
+ * vm_page_release:
*
- * Returns the page descriptors in ascending order, or
- * Returns KERN_RESOURCE_SHORTAGE if it could not.
+ * Return a page to the free list.
*/
-/* Biggest phys page number for the pages we handle in VM */
-
-vm_size_t vm_page_big_pagenum = 0; /* Set this before call! */
-
-kern_return_t
-vm_page_grab_contiguous_pages(
- int npages,
- vm_page_t pages[],
- natural_t *bits,
- boolean_t external)
+static void vm_page_release(
+ vm_page_t mem,
+ boolean_t external)
{
- register int first_set;
- int size, alloc_size;
- kern_return_t ret;
- vm_page_t mem, *prevmemp;
-
-#ifndef NBBY
-#define NBBY 8 /* size in bits of sizeof()`s unity */
-#endif
-
-#define NBPEL (sizeof(natural_t)*NBBY)
-
- size = (vm_page_big_pagenum + NBPEL - 1)
- & ~(NBPEL - 1); /* in bits */
-
- size = size / NBBY; /* in bytes */
-
- /*
- * If we are called before the VM system is fully functional
- * the invoker must provide us with the work space. [one bit
- * per page starting at phys 0 and up to vm_page_big_pagenum]
- */
- if (bits == 0) {
- alloc_size = round_page(size);
- if (kmem_alloc_wired(kernel_map,
- (vm_offset_t *)&bits,
- alloc_size)
- != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
- } else
- alloc_size = 0;
-
- memset(bits, 0, size);
-
- /*
- * A very large granularity call, its rare so that is ok
- */
simple_lock(&vm_page_queue_free_lock);
+ if (mem->free)
+ panic("vm_page_release");
+ mem->free = TRUE;
+ vm_page_free_pa(mem, 0);
+ if (external)
+ vm_page_external_count--;
/*
- * Do not dip into the reserved pool.
- */
-
- if ((vm_page_free_count < vm_page_free_reserved)
- || (vm_page_external_count >= vm_page_external_limit)) {
- printf_once("no more room for vm_page_grab_contiguous_pages");
- simple_unlock(&vm_page_queue_free_lock);
- return KERN_RESOURCE_SHORTAGE;
- }
-
- /*
- * First pass through, build a big bit-array of
- * the pages that are free. It is not going to
- * be too large anyways, in 4k we can fit info
- * for 32k pages.
+ * Check if we should wake up someone waiting for page.
+ * But don't bother waking them unless they can allocate.
+ *
+ * We wakeup only one thread, to prevent starvation.
+ * Because the scheduling system handles wait queues FIFO,
+ * if we wakeup all waiting threads, one greedy thread
+ * can starve multiple niceguy threads. When the threads
+ * all wakeup, the greedy threads runs first, grabs the page,
+ * and waits for another page. It will be the first to run
+ * when the next page is freed.
+ *
+ * However, there is a slight danger here.
+ * The thread we wake might not use the free page.
+ * Then the other threads could wait indefinitely
+ * while the page goes unused. To forestall this,
+ * the pageout daemon will keep making free pages
+ * as long as vm_page_free_wanted is non-zero.
*/
- mem = vm_page_queue_free;
- while (mem) {
- register int word_index, bit_index;
-
- bit_index = (mem->phys_addr >> PAGE_SHIFT);
- word_index = bit_index / NBPEL;
- bit_index = bit_index - (word_index * NBPEL);
- bits[word_index] |= 1 << bit_index;
- mem = (vm_page_t) mem->pageq.next;
+ if ((vm_page_free_wanted > 0) &&
+ (vm_page_mem_free() >= vm_page_free_reserved)) {
+ vm_page_free_wanted--;
+ thread_wakeup_one((event_t) &vm_page_free_avail);
}
- /*
- * Second loop. Scan the bit array for NPAGES
- * contiguous bits. That gives us, if any,
- * the range of pages we will be grabbing off
- * the free list.
- */
- {
- register int bits_so_far = 0, i;
+ simple_unlock(&vm_page_queue_free_lock);
+}
- first_set = 0;
+/*
+ * vm_page_grab_contig:
+ *
+ * Remove a block of contiguous pages from the free list.
+ * Returns VM_PAGE_NULL if the request fails.
+ */
- for (i = 0; i < size; i += sizeof(natural_t)) {
+vm_page_t vm_page_grab_contig(
+ vm_size_t size,
+ unsigned int selector)
+{
+ unsigned int i, order, nr_pages;
+ vm_page_t mem;
- register natural_t v = bits[i / sizeof(natural_t)];
- register int bitpos;
+ order = vm_page_order(size);
+ nr_pages = 1 << order;
- /*
- * Bitscan this one word
- */
- if (v) {
- /*
- * keep counting them beans ?
- */
- bitpos = 0;
+ simple_lock(&vm_page_queue_free_lock);
- if (bits_so_far) {
-count_ones:
- while (v & 1) {
- bitpos++;
- /*
- * got enough beans ?
- */
- if (++bits_so_far == npages)
- goto found_em;
- v >>= 1;
- }
- /* if we are being lucky, roll again */
- if (bitpos == NBPEL)
- continue;
- }
+ /*
+ * Only let privileged threads (involved in pageout)
+ * dip into the reserved pool or exceed the limit
+ * for externally-managed pages.
+ */
- /*
- * search for beans here
- */
- bits_so_far = 0;
- while ((bitpos < NBPEL) && ((v & 1) == 0)) {
- bitpos++;
- v >>= 1;
- }
- if (v & 1) {
- first_set = (i * NBBY) + bitpos;
- goto count_ones;
- }
- }
- /*
- * No luck
- */
- bits_so_far = 0;
- }
+ if (((vm_page_mem_free() - nr_pages) <= vm_page_free_reserved)
+ && !current_thread()->vm_privilege) {
+ simple_unlock(&vm_page_queue_free_lock);
+ return VM_PAGE_NULL;
}
- /*
- * We could not find enough contiguous pages.
- */
- simple_unlock(&vm_page_queue_free_lock);
+ /* TODO Allow caller to pass type */
+ mem = vm_page_alloc_pa(order, selector, VM_PT_KERNEL);
- printf_once("no contiguous room for vm_page_grab_contiguous_pages");
- ret = KERN_RESOURCE_SHORTAGE;
- goto out;
+ if (mem == NULL)
+ panic("vm_page_grab_contig");
- /*
- * Final pass. Now we know which pages we want.
- * Scan the list until we find them all, grab
- * pages as we go. FIRST_SET tells us where
- * in the bit-array our pages start.
- */
-found_em:
- vm_page_free_count -= npages;
- if (vm_page_free_count < vm_page_free_count_minimum)
- vm_page_free_count_minimum = vm_page_free_count;
- if (external)
- vm_page_external_count += npages;
- {
- register vm_offset_t first_phys, last_phys;
-
- /* cache values for compare */
- first_phys = first_set << PAGE_SHIFT;
- last_phys = first_phys + (npages << PAGE_SHIFT);/* not included */
-
- /* running pointers */
- mem = vm_page_queue_free;
- prevmemp = &vm_page_queue_free;
-
- while (mem) {
-
- register vm_offset_t addr;
-
- addr = mem->phys_addr;
-
- if ((addr >= first_phys) &&
- (addr < last_phys)) {
- *prevmemp = (vm_page_t) mem->pageq.next;
- pages[(addr - first_phys) >> PAGE_SHIFT] = mem;
- mem->free = FALSE;
- mem->extcounted = mem->external = external;
- /*
- * Got them all ?
- */
- if (--npages == 0) break;
- } else
- prevmemp = (vm_page_t *) &mem->pageq.next;
-
- mem = (vm_page_t) mem->pageq.next;
- }
+ for (i = 0; i < nr_pages; i++) {
+ mem[i].free = FALSE;
+ mem[i].extcounted = mem[i].external = 0;
}
simple_unlock(&vm_page_queue_free_lock);
@@ -1145,63 +926,42 @@ found_em:
* it doesn't really matter.
*/
- if ((vm_page_free_count < vm_page_free_min) ||
- ((vm_page_free_count < vm_page_free_target) &&
+ if ((vm_page_mem_free() < vm_page_free_min) ||
+ ((vm_page_mem_free() < vm_page_free_target) &&
(vm_page_inactive_count < vm_page_inactive_target)))
- thread_wakeup(&vm_page_free_wanted);
-
- ret = KERN_SUCCESS;
-out:
- if (alloc_size)
- kmem_free(kernel_map, (vm_offset_t) bits, alloc_size);
+ thread_wakeup((event_t) &vm_page_free_wanted);
- return ret;
+ return mem;
}
/*
- * vm_page_release:
+ * vm_page_free_contig:
*
- * Return a page to the free list.
+ * Return a block of contiguous pages to the free list.
*/
-void vm_page_release(
- register vm_page_t mem,
- boolean_t external)
+void vm_page_free_contig(vm_page_t mem, vm_size_t size)
{
+ unsigned int i, order, nr_pages;
+
+ order = vm_page_order(size);
+ nr_pages = 1 << order;
+
simple_lock(&vm_page_queue_free_lock);
- if (mem->free)
- panic("vm_page_release");
- mem->free = TRUE;
- mem->pageq.next = (queue_entry_t) vm_page_queue_free;
- vm_page_queue_free = mem;
- vm_page_free_count++;
- if (external)
- vm_page_external_count--;
- /*
- * Check if we should wake up someone waiting for page.
- * But don't bother waking them unless they can allocate.
- *
- * We wakeup only one thread, to prevent starvation.
- * Because the scheduling system handles wait queues FIFO,
- * if we wakeup all waiting threads, one greedy thread
- * can starve multiple niceguy threads. When the threads
- * all wakeup, the greedy threads runs first, grabs the page,
- * and waits for another page. It will be the first to run
- * when the next page is freed.
- *
- * However, there is a slight danger here.
- * The thread we wake might not use the free page.
- * Then the other threads could wait indefinitely
- * while the page goes unused. To forestall this,
- * the pageout daemon will keep making free pages
- * as long as vm_page_free_wanted is non-zero.
- */
+ for (i = 0; i < nr_pages; i++) {
+ if (mem[i].free)
+ panic("vm_page_free_contig");
+
+ mem[i].free = TRUE;
+ }
+
+ vm_page_free_pa(mem, order);
if ((vm_page_free_wanted > 0) &&
- (vm_page_free_count >= vm_page_free_reserved)) {
+ (vm_page_mem_free() >= vm_page_free_reserved)) {
vm_page_free_wanted--;
- thread_wakeup_one((event_t) &vm_page_free_count);
+ thread_wakeup_one((event_t) &vm_page_free_avail);
}
simple_unlock(&vm_page_queue_free_lock);
@@ -1227,11 +987,11 @@ void vm_page_wait(
*/
simple_lock(&vm_page_queue_free_lock);
- if ((vm_page_free_count < vm_page_free_target)
+ if ((vm_page_mem_free() < vm_page_free_target)
|| (vm_page_external_count > vm_page_external_limit)) {
if (vm_page_free_wanted++ == 0)
thread_wakeup((event_t)&vm_page_free_wanted);
- assert_wait((event_t)&vm_page_free_count, FALSE);
+ assert_wait((event_t)&vm_page_free_avail, FALSE);
simple_unlock(&vm_page_queue_free_lock);
if (continuation != 0) {
counter(c_vm_page_wait_block_user++);
@@ -1257,7 +1017,7 @@ vm_page_t vm_page_alloc(
vm_object_t object,
vm_offset_t offset)
{
- register vm_page_t mem;
+ vm_page_t mem;
mem = vm_page_grab(!object->internal);
if (mem == VM_PAGE_NULL)
@@ -1279,7 +1039,7 @@ vm_page_t vm_page_alloc(
* Object and page queues must be locked prior to entry.
*/
void vm_page_free(
- register vm_page_t mem)
+ vm_page_t mem)
{
if (mem->free)
panic("vm_page_free");
@@ -1310,12 +1070,13 @@ void vm_page_free(
*/
if (mem->private || mem->fictitious) {
- vm_page_init(mem, vm_page_fictitious_addr);
+ vm_page_init(mem);
+ mem->phys_addr = vm_page_fictitious_addr;
mem->fictitious = TRUE;
vm_page_release_fictitious(mem);
} else {
int external = mem->external && mem->extcounted;
- vm_page_init(mem, mem->phys_addr);
+ vm_page_init(mem);
vm_page_release(mem, external);
}
}
@@ -1330,7 +1091,7 @@ void vm_page_free(
* The page's object and the page queues must be locked.
*/
void vm_page_wire(
- register vm_page_t mem)
+ vm_page_t mem)
{
VM_PAGE_CHECK(mem);
@@ -1351,7 +1112,7 @@ void vm_page_wire(
* The page's object and the page queues must be locked.
*/
void vm_page_unwire(
- register vm_page_t mem)
+ vm_page_t mem)
{
VM_PAGE_CHECK(mem);
@@ -1374,7 +1135,7 @@ void vm_page_unwire(
* The page queues must be locked.
*/
void vm_page_deactivate(
- register vm_page_t m)
+ vm_page_t m)
{
VM_PAGE_CHECK(m);
@@ -1408,7 +1169,7 @@ void vm_page_deactivate(
*/
void vm_page_activate(
- register vm_page_t m)
+ vm_page_t m)
{
VM_PAGE_CHECK(m);
@@ -1505,10 +1266,10 @@ vm_page_info(
* Routine: vm_page_print [exported]
*/
void vm_page_print(p)
- vm_page_t p;
+ const vm_page_t p;
{
iprintf("Page 0x%X: object 0x%X,", (vm_offset_t) p, (vm_offset_t) p->object);
- printf(" offset 0x%X", (vm_offset_t) p->offset);
+ printf(" offset 0x%X", p->offset);
printf("wire_count %d,", p->wire_count);
printf(" %s",
(p->active ? "active" : (p->inactive ? "inactive" : "loose")));
@@ -1533,7 +1294,7 @@ void vm_page_print(p)
printf("%s,",
(p->tabled ? "" : "not_tabled"));
printf("phys_addr = 0x%X, lock = 0x%X, unlock_request = 0x%X\n",
- (vm_offset_t) p->phys_addr,
+ p->phys_addr,
(vm_offset_t) p->page_lock,
(vm_offset_t) p->unlock_request);
}
diff --git a/vm/vm_resident.h b/vm/vm_resident.h
index 67f1807f..e8bf6818 100644
--- a/vm/vm_resident.h
+++ b/vm/vm_resident.h
@@ -38,8 +38,8 @@
* The object and page must be locked.
*/
extern void vm_page_replace (
- register vm_page_t mem,
- register vm_object_t object,
- register vm_offset_t offset);
+ vm_page_t mem,
+ vm_object_t object,
+ vm_offset_t offset);
#endif /* _VM_RESIDENT_H_ */
diff --git a/vm/vm_user.c b/vm/vm_user.c
index 6fe398e0..e65f6d5f 100644
--- a/vm/vm_user.c
+++ b/vm/vm_user.c
@@ -56,11 +56,11 @@ vm_statistics_data_t vm_stat;
* vm_allocate allocates "zero fill" memory in the specfied
* map.
*/
-kern_return_t vm_allocate(map, addr, size, anywhere)
- register vm_map_t map;
- register vm_offset_t *addr;
- register vm_size_t size;
- boolean_t anywhere;
+kern_return_t vm_allocate(
+ vm_map_t map,
+ vm_offset_t *addr,
+ vm_size_t size,
+ boolean_t anywhere)
{
kern_return_t result;
@@ -97,10 +97,10 @@ kern_return_t vm_allocate(map, addr, size, anywhere)
* vm_deallocate deallocates the specified range of addresses in the
* specified address map.
*/
-kern_return_t vm_deallocate(map, start, size)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
+kern_return_t vm_deallocate(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -115,11 +115,11 @@ kern_return_t vm_deallocate(map, start, size)
* vm_inherit sets the inheritance of the specified range in the
* specified map.
*/
-kern_return_t vm_inherit(map, start, size, new_inheritance)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- vm_inherit_t new_inheritance;
+kern_return_t vm_inherit(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ vm_inherit_t new_inheritance)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -149,12 +149,12 @@ kern_return_t vm_inherit(map, start, size, new_inheritance)
* specified map.
*/
-kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
- register vm_map_t map;
- vm_offset_t start;
- vm_size_t size;
- boolean_t set_maximum;
- vm_prot_t new_protection;
+kern_return_t vm_protect(
+ vm_map_t map,
+ vm_offset_t start,
+ vm_size_t size,
+ boolean_t set_maximum,
+ vm_prot_t new_protection)
{
if ((map == VM_MAP_NULL) ||
(new_protection & ~(VM_PROT_ALL|VM_PROT_NOTIFY)))
@@ -172,9 +172,9 @@ kern_return_t vm_protect(map, start, size, set_maximum, new_protection)
set_maximum));
}
-kern_return_t vm_statistics(map, stat)
- vm_map_t map;
- vm_statistics_data_t *stat;
+kern_return_t vm_statistics(
+ vm_map_t map,
+ vm_statistics_data_t *stat)
{
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -182,7 +182,7 @@ kern_return_t vm_statistics(map, stat)
*stat = vm_stat;
stat->pagesize = PAGE_SIZE;
- stat->free_count = vm_page_free_count;
+ stat->free_count = vm_page_mem_free();
stat->active_count = vm_page_active_count;
stat->inactive_count = vm_page_inactive_count;
stat->wire_count = vm_page_wire_count;
@@ -217,15 +217,13 @@ kern_return_t vm_cache_statistics(
* Handle machine-specific attributes for a mapping, such
* as cachability, migrability, etc.
*/
-kern_return_t vm_machine_attribute(map, address, size, attribute, value)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- vm_machine_attribute_t attribute;
- vm_machine_attribute_val_t* value; /* IN/OUT */
+kern_return_t vm_machine_attribute(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ vm_machine_attribute_t attribute,
+ vm_machine_attribute_val_t* value) /* IN/OUT */
{
- extern kern_return_t vm_map_machine_attribute();
-
if (map == VM_MAP_NULL)
return(KERN_INVALID_ARGUMENT);
@@ -237,12 +235,12 @@ kern_return_t vm_machine_attribute(map, address, size, attribute, value)
return vm_map_machine_attribute(map, address, size, attribute, value);
}
-kern_return_t vm_read(map, address, size, data, data_size)
- vm_map_t map;
- vm_address_t address;
- vm_size_t size;
- pointer_t *data;
- vm_size_t *data_size;
+kern_return_t vm_read(
+ vm_map_t map,
+ vm_address_t address,
+ vm_size_t size,
+ pointer_t *data,
+ vm_size_t *data_size)
{
kern_return_t error;
vm_map_copy_t ipc_address;
@@ -261,11 +259,11 @@ kern_return_t vm_read(map, address, size, data, data_size)
return(error);
}
-kern_return_t vm_write(map, address, data, size)
- vm_map_t map;
- vm_address_t address;
- pointer_t data;
- vm_size_t size;
+kern_return_t vm_write(
+ vm_map_t map,
+ vm_address_t address,
+ pointer_t data,
+ vm_size_t size)
{
if (map == VM_MAP_NULL)
return KERN_INVALID_ARGUMENT;
@@ -274,11 +272,11 @@ kern_return_t vm_write(map, address, data, size)
FALSE /* interruptible XXX */);
}
-kern_return_t vm_copy(map, source_address, size, dest_address)
- vm_map_t map;
- vm_address_t source_address;
- vm_size_t size;
- vm_address_t dest_address;
+kern_return_t vm_copy(
+ vm_map_t map,
+ vm_address_t source_address,
+ vm_size_t size,
+ vm_address_t dest_address)
{
vm_map_copy_t copy;
kern_return_t kr;
@@ -306,26 +304,19 @@ kern_return_t vm_copy(map, source_address, size, dest_address)
* Routine: vm_map
*/
kern_return_t vm_map(
- target_map,
- address, size, mask, anywhere,
- memory_object, offset,
- copy,
- cur_protection, max_protection, inheritance)
- vm_map_t target_map;
- vm_offset_t *address;
- vm_size_t size;
- vm_offset_t mask;
- boolean_t anywhere;
- ipc_port_t memory_object;
- vm_offset_t offset;
- boolean_t copy;
- vm_prot_t cur_protection;
- vm_prot_t max_protection;
- vm_inherit_t inheritance;
+ vm_map_t target_map,
+ vm_offset_t *address,
+ vm_size_t size,
+ vm_offset_t mask,
+ boolean_t anywhere,
+ ipc_port_t memory_object,
+ vm_offset_t offset,
+ boolean_t copy,
+ vm_prot_t cur_protection,
+ vm_prot_t max_protection,
+ vm_inherit_t inheritance)
{
- register
vm_object_t object;
- register
kern_return_t result;
if ((target_map == VM_MAP_NULL) ||
@@ -414,15 +405,29 @@ kern_return_t vm_map(
*
* [ To unwire the pages, specify VM_PROT_NONE. ]
*/
-kern_return_t vm_wire(host, map, start, size, access)
- host_t host;
- register vm_map_t map;
+kern_return_t vm_wire(port, map, start, size, access)
+ const ipc_port_t port;
+ vm_map_t map;
vm_offset_t start;
vm_size_t size;
vm_prot_t access;
{
- if (host == HOST_NULL)
+ boolean_t priv;
+
+ if (!IP_VALID(port))
+ return KERN_INVALID_HOST;
+
+ ip_lock(port);
+ if (!ip_active(port) ||
+ (ip_kotype(port) != IKOT_HOST_PRIV
+ && ip_kotype(port) != IKOT_HOST))
+ {
+ ip_unlock(port);
return KERN_INVALID_HOST;
+ }
+
+ priv = ip_kotype(port) == IKOT_HOST_PRIV;
+ ip_unlock(port);
if (map == VM_MAP_NULL)
return KERN_INVALID_TASK;
@@ -435,6 +440,10 @@ kern_return_t vm_wire(host, map, start, size, access)
if (projected_buffer_in_range(map, start, start+size))
return(KERN_INVALID_ARGUMENT);
+ /* TODO: make it tunable */
+ if (!priv && access != VM_PROT_NONE && map->user_wired + size > 65536)
+ return KERN_NO_ACCESS;
+
return vm_map_pageable_user(map,
trunc_page(start),
round_page(start+size),
diff --git a/xen/block.c b/xen/block.c
index 3e4ce7c6..d98b31e2 100644
--- a/xen/block.c
+++ b/xen/block.c
@@ -217,7 +217,7 @@ void hyp_block_init(void) {
sprintf(device_name, "%s%ds%d", prefix, disk, partition);
else
sprintf(device_name, "%s%d", prefix, disk);
- bd->name = (char*) kalloc(strlen(device_name));
+ bd->name = (char*) kalloc(strlen(device_name) + 1);
strcpy(bd->name, device_name);
/* Get domain id of backend driver. */
@@ -382,8 +382,8 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
port = ipc_port_alloc_kernel();
if (port == IP_NULL) {
- err = KERN_RESOURCE_SHORTAGE;
- goto out;
+ device_close(bd);
+ return KERN_RESOURCE_SHORTAGE;
}
bd->port = port;
@@ -396,7 +396,6 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
ipc_port_nsrequest (bd->port, 1, notify, &notify);
assert (notify == IP_NULL);
-out:
if (IP_VALID (reply_port))
ds_device_open_reply (reply_port, reply_port_type, D_SUCCESS, port);
else
diff --git a/xen/console.c b/xen/console.c
index 884376ff..313b9342 100644
--- a/xen/console.c
+++ b/xen/console.c
@@ -47,7 +47,7 @@ int hypputc(int c)
hyp_console_io(CONSOLEIO_write, 1, kvtolin(&d));
} else {
spl_t spl = splhigh();
- int complain;
+ static int complain;
simple_lock(&outlock);
while (hyp_ring_smash(console->out, console->out_prod, console->out_cons)) {
if (!complain) {
diff --git a/xen/console.h b/xen/console.h
index ad171a47..061ba281 100644
--- a/xen/console.h
+++ b/xen/console.h
@@ -21,7 +21,9 @@
#include <machine/xen.h>
#include <string.h>
+#include <mach/port.h>
#include <device/cons.h>
+#include <device/io_req.h>
#define hyp_console_write(str, len) hyp_console_io (CONSOLEIO_write, (len), kvtolin(str))
@@ -37,4 +39,12 @@ extern int hypcngetc(dev_t dev, int wait);
extern int hypcnprobe(struct consdev *cp);
extern int hypcninit(struct consdev *cp);
+extern int hypcnopen(dev_t dev, int flag, io_req_t ior);
+extern int hypcnread(int dev, io_req_t ior);
+extern int hypcnwrite(int dev, io_req_t ior);
+extern int hypcnclose(int dev, int flag);
+extern io_return_t hypcngetstat(dev_t dev, int flavor, int *data, unsigned int *count);
+extern io_return_t hypcnsetstat(dev_t dev, int flavor, int *data, unsigned int count);
+extern int hypcnportdeath(dev_t dev, mach_port_t port);
+
#endif /* XEN_CONSOLE_H */
diff --git a/xen/grant.c b/xen/grant.c
index 3d5c3fe7..ae3a7bfc 100644
--- a/xen/grant.c
+++ b/xen/grant.c
@@ -25,7 +25,7 @@
#include "grant.h"
#define NR_RESERVED_ENTRIES 8
-#define NR_GRANT_PAGES 4
+#define NR_GRANT_PAGES 8
decl_simple_lock_data(static,lock);
static struct grant_entry *grants;
diff --git a/xen/net.c b/xen/net.c
index fb264719..55643651 100644
--- a/xen/net.c
+++ b/xen/net.c
@@ -568,8 +568,8 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
port = ipc_port_alloc_kernel();
if (port == IP_NULL) {
- err = KERN_RESOURCE_SHORTAGE;
- goto out;
+ device_close (nd);
+ return KERN_RESOURCE_SHORTAGE;
}
nd->port = port;
@@ -582,7 +582,6 @@ device_open (ipc_port_t reply_port, mach_msg_type_name_t reply_port_type,
ipc_port_nsrequest (nd->port, 1, notify, &notify);
assert (notify == IP_NULL);
-out:
if (IP_VALID (reply_port))
ds_device_open_reply (reply_port, reply_port_type, D_SUCCESS, dev_to_port(nd));
else
diff --git a/xen/time.c b/xen/time.c
index a11e7eb4..4ebe91fa 100644
--- a/xen/time.c
+++ b/xen/time.c
@@ -34,6 +34,7 @@ static unsigned64_t lastnsec;
static unsigned64_t hyp_get_stime(void) {
unsigned32_t version;
unsigned64_t cpu_clock, last_cpu_clock, delta, system_time;
+ unsigned64_t delta_high, delta_low;
unsigned32_t mul;
signed8_t shift;
volatile struct vcpu_time_info *time = &hyp_shared_info.vcpu_info[0].time;
@@ -54,7 +55,10 @@ static unsigned64_t hyp_get_stime(void) {
delta >>= -shift;
else
delta <<= shift;
- return system_time + ((delta * (unsigned64_t) mul) >> 32);
+ delta_high = delta >> 32;
+ delta_low = (unsigned32_t) delta;
+ return system_time + ((delta_low * (unsigned64_t) mul) >> 32)
+ + (delta_high * (unsigned64_t) mul);
}
unsigned64_t hyp_get_time(void) {
@@ -107,7 +111,6 @@ static void hypclock_intr(int unit, int old_ipl, void *ret_addr, struct i386_int
}
extern struct timeval time;
-extern struct timezone tz;
int
readtodc(tp)